diff options
| author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-10-15 09:45:50 +0000 |
|---|---|---|
| committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-10-15 09:45:50 +0000 |
| commit | e15dd966d523731101f70ccf768bba12435a0208 (patch) | |
| tree | ae9cb828a24ded2585a41af3f21411523b47897d /Source/JavaScriptCore/bytecode | |
| download | WebKitGtk-tarball-e15dd966d523731101f70ccf768bba12435a0208.tar.gz | |
webkitgtk-2.10.2webkitgtk-2.10.2
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
127 files changed, 24180 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp new file mode 100644 index 000000000..905b5bd3c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ArrayAllocationProfile.h" + +#include "JSCInlines.h" + +namespace JSC { + +void ArrayAllocationProfile::updateIndexingType() +{ + // This is awkwardly racy but totally sound even when executed concurrently. The + // worst cases go something like this: + // + // - Two threads race to execute this code; one of them succeeds in updating the + // m_currentIndexingType and the other either updates it again, or sees a null + // m_lastArray; if it updates it again then at worst it will cause the profile + // to "forget" some array. That's still sound, since we don't promise that + // this profile is a reflection of any kind of truth. + // + // - A concurrent thread reads m_lastArray, but that array is now dead. While + // it's possible for that array to no longer be reachable, it cannot actually + // be freed, since we require the GC to wait until all concurrent JITing + // finishes. + + JSArray* lastArray = m_lastArray; + if (!lastArray) + return; + m_currentIndexingType = leastUpperBoundOfIndexingTypes(m_currentIndexingType, lastArray->indexingType()); + m_lastArray = 0; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h new file mode 100644 index 000000000..f03763f70 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ArrayAllocationProfile_h +#define ArrayAllocationProfile_h + +#include "IndexingType.h" +#include "JSArray.h" + +namespace JSC { + +class ArrayAllocationProfile { +public: + ArrayAllocationProfile() + : m_currentIndexingType(ArrayWithUndecided) + , m_lastArray(0) + { + } + + IndexingType selectIndexingType() + { + JSArray* lastArray = m_lastArray; + if (lastArray && UNLIKELY(lastArray->indexingType() != m_currentIndexingType)) + updateIndexingType(); + return m_currentIndexingType; + } + + JSArray* updateLastAllocation(JSArray* lastArray) + { + m_lastArray = lastArray; + return lastArray; + } + + JS_EXPORT_PRIVATE void updateIndexingType(); + + static IndexingType selectIndexingTypeFor(ArrayAllocationProfile* profile) + { + if (!profile) + return ArrayWithUndecided; + return profile->selectIndexingType(); + } + + static JSArray* updateLastAllocationFor(ArrayAllocationProfile* profile, JSArray* lastArray) + { + if (profile) + profile->updateLastAllocation(lastArray); + return lastArray; + } + +private: + + IndexingType m_currentIndexingType; + JSArray* m_lastArray; +}; + +} // namespace JSC + +#endif // ArrayAllocationProfile_h + diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp new file mode 100644 index 000000000..b8ade2223 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ArrayProfile.h" + +#include "CodeBlock.h" +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> +#include <wtf/StringExtras.h> +#include <wtf/StringPrintStream.h> + +namespace JSC { + +void dumpArrayModes(PrintStream& out, ArrayModes arrayModes) +{ + if (!arrayModes) { + out.print("<empty>"); + return; + } + + if (arrayModes == ALL_ARRAY_MODES) { + out.print("TOP"); + return; + } + + CommaPrinter comma("|"); + if (arrayModes & asArrayModes(NonArray)) + out.print(comma, "NonArray"); + if (arrayModes & asArrayModes(NonArrayWithInt32)) + out.print(comma, "NonArrayWithInt32"); + if (arrayModes & asArrayModes(NonArrayWithDouble)) + out.print(comma, "NonArrayWithDouble"); + if (arrayModes & asArrayModes(NonArrayWithContiguous)) + out.print(comma, "NonArrayWithContiguous"); + if (arrayModes & asArrayModes(NonArrayWithArrayStorage)) + out.print(comma, "NonArrayWithArrayStorage"); + if (arrayModes & asArrayModes(NonArrayWithSlowPutArrayStorage)) + out.print(comma, "NonArrayWithSlowPutArrayStorage"); + if (arrayModes & asArrayModes(ArrayClass)) + out.print(comma, "ArrayClass"); + if (arrayModes & asArrayModes(ArrayWithUndecided)) + out.print(comma, "ArrayWithUndecided"); + if (arrayModes & asArrayModes(ArrayWithInt32)) + out.print(comma, "ArrayWithInt32"); + if (arrayModes & asArrayModes(ArrayWithDouble)) + out.print(comma, "ArrayWithDouble"); + if (arrayModes & asArrayModes(ArrayWithContiguous)) + out.print(comma, "ArrayWithContiguous"); + if (arrayModes & asArrayModes(ArrayWithArrayStorage)) + out.print(comma, "ArrayWithArrayStorage"); + if (arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage)) + out.print(comma, "ArrayWithSlowPutArrayStorage"); + + if (arrayModes & Int8ArrayMode) + out.print(comma, "Int8ArrayMode"); + if (arrayModes & Int16ArrayMode) + out.print(comma, "Int16ArrayMode"); + if (arrayModes & Int32ArrayMode) + out.print(comma, "Int32ArrayMode"); + if (arrayModes & Uint8ArrayMode) + out.print(comma, "Uint8ArrayMode"); + if (arrayModes & Uint8ClampedArrayMode) + out.print(comma, "Uint8ClampedArrayMode"); + if (arrayModes & Uint16ArrayMode) + out.print(comma, "Uint16ArrayMode"); + if (arrayModes & Uint32ArrayMode) + out.print(comma, "Uint32ArrayMode"); + if (arrayModes & Float32ArrayMode) + out.print(comma, "Float32ArrayMode"); + if (arrayModes & Float64ArrayMode) + out.print(comma, "Float64ArrayMode"); +} + +void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker& locker, CodeBlock* codeBlock) +{ + if (!m_lastSeenStructureID) + return; + + Structure* lastSeenStructure = codeBlock->heap()->structureIDTable().get(m_lastSeenStructureID); + computeUpdatedPrediction(locker, codeBlock, lastSeenStructure); + m_lastSeenStructureID = 0; +} + +void ArrayProfile::computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock* codeBlock, Structure* lastSeenStructure) +{ + m_observedArrayModes |= arrayModeFromStructure(lastSeenStructure); + + if (!m_didPerformFirstRunPruning + && hasTwoOrMoreBitsSet(m_observedArrayModes)) { + m_observedArrayModes = arrayModeFromStructure(lastSeenStructure); + m_didPerformFirstRunPruning = true; + } + + m_mayInterceptIndexedAccesses |= + lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero(); + JSGlobalObject* globalObject = codeBlock->globalObject(); + if (!globalObject->isOriginalArrayStructure(lastSeenStructure) + && !globalObject->isOriginalTypedArrayStructure(lastSeenStructure)) + m_usesOriginalArrayStructures = false; +} + +CString ArrayProfile::briefDescription(const ConcurrentJITLocker& locker, CodeBlock* codeBlock) +{ + computeUpdatedPrediction(locker, codeBlock); + return briefDescriptionWithoutUpdating(locker); +} + +CString ArrayProfile::briefDescriptionWithoutUpdating(const ConcurrentJITLocker&) +{ + StringPrintStream out; + + bool hasPrinted = false; + + if (m_observedArrayModes) { + if (hasPrinted) + out.print(", "); + out.print(ArrayModesDump(m_observedArrayModes)); + hasPrinted = true; + } + + if (m_mayStoreToHole) { + if (hasPrinted) + out.print(", "); + out.print("Hole"); + hasPrinted = true; + } + + if (m_outOfBounds) { + if (hasPrinted) + out.print(", "); + out.print("OutOfBounds"); + hasPrinted = true; + } + + if (m_mayInterceptIndexedAccesses) { + if (hasPrinted) + out.print(", "); + out.print("Intercept"); + hasPrinted = true; + } + + if (m_usesOriginalArrayStructures) { + if (hasPrinted) + out.print(", "); + out.print("Original"); + hasPrinted = true; + } + + UNUSED_PARAM(hasPrinted); + + return out.toCString(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h new file mode 100644 index 000000000..66b295da7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ArrayProfile_h +#define ArrayProfile_h + +#include "ConcurrentJITLock.h" +#include "JSArray.h" +#include "Structure.h" +#include <wtf/HashMap.h> +#include <wtf/SegmentedVector.h> + +namespace JSC { + +class CodeBlock; +class LLIntOffsetsExtractor; + +// This is a bitfield where each bit represents an type of array access that we have seen. +// There are 16 indexing types that use the lower bits. +// There are 9 typed array types taking the bits 16 to 25. +typedef unsigned ArrayModes; + +const ArrayModes Int8ArrayMode = 1 << 16; +const ArrayModes Int16ArrayMode = 1 << 17; +const ArrayModes Int32ArrayMode = 1 << 18; +const ArrayModes Uint8ArrayMode = 1 << 19; +const ArrayModes Uint8ClampedArrayMode = 1 << 20; +const ArrayModes Uint16ArrayMode = 1 << 21; +const ArrayModes Uint32ArrayMode = 1 << 22; +const ArrayModes Float32ArrayMode = 1 << 23; +const ArrayModes Float64ArrayMode = 1 << 24; + +#define asArrayModes(type) \ + (static_cast<unsigned>(1) << static_cast<unsigned>(type)) + +#define ALL_TYPED_ARRAY_MODES \ + (Int8ArrayMode \ + | Int16ArrayMode \ + | Int32ArrayMode \ + | Uint8ArrayMode \ + | Uint8ClampedArrayMode \ + | Uint16ArrayMode \ + | Uint32ArrayMode \ + | Float32ArrayMode \ + | Float64ArrayMode \ + ) + +#define ALL_NON_ARRAY_ARRAY_MODES \ + (asArrayModes(NonArray) \ + | asArrayModes(NonArrayWithInt32) \ + | asArrayModes(NonArrayWithDouble) \ + | asArrayModes(NonArrayWithContiguous) \ + | asArrayModes(NonArrayWithArrayStorage) \ + | asArrayModes(NonArrayWithSlowPutArrayStorage) \ + | ALL_TYPED_ARRAY_MODES) + +#define ALL_ARRAY_ARRAY_MODES \ + (asArrayModes(ArrayClass) \ + | asArrayModes(ArrayWithUndecided) \ + | asArrayModes(ArrayWithInt32) \ + | asArrayModes(ArrayWithDouble) \ + | asArrayModes(ArrayWithContiguous) \ + | asArrayModes(ArrayWithArrayStorage) \ + | asArrayModes(ArrayWithSlowPutArrayStorage)) + +#define ALL_ARRAY_MODES (ALL_NON_ARRAY_ARRAY_MODES | ALL_ARRAY_ARRAY_MODES) + +inline ArrayModes arrayModeFromStructure(Structure* structure) +{ + switch (structure->classInfo()->typedArrayStorageType) { + case TypeInt8: + return Int8ArrayMode; + case TypeUint8: + return Uint8ArrayMode; + case TypeUint8Clamped: + return Uint8ClampedArrayMode; + case TypeInt16: + return Int16ArrayMode; + case TypeUint16: + return Uint16ArrayMode; + case TypeInt32: + return Int32ArrayMode; + case TypeUint32: + return Uint32ArrayMode; + case TypeFloat32: + return Float32ArrayMode; + case TypeFloat64: + return Float64ArrayMode; + case TypeDataView: + case NotTypedArray: + break; + } + return asArrayModes(structure->indexingType()); +} + +void dumpArrayModes(PrintStream&, ArrayModes); +MAKE_PRINT_ADAPTOR(ArrayModesDump, ArrayModes, dumpArrayModes); + +inline bool mergeArrayModes(ArrayModes& left, ArrayModes right) +{ + ArrayModes newModes = left | right; + if (newModes == left) + return false; + left = newModes; + return true; +} + +inline bool arrayModesAreClearOrTop(ArrayModes modes) +{ + return !modes || modes == ALL_ARRAY_MODES; +} + +// Checks if proven is a subset of expected. +inline bool arrayModesAlreadyChecked(ArrayModes proven, ArrayModes expected) +{ + return (expected | proven) == expected; +} + +inline bool arrayModesInclude(ArrayModes arrayModes, IndexingType shape) +{ + return !!(arrayModes & (asArrayModes(NonArray | shape) | asArrayModes(ArrayClass | shape))); +} + +inline bool shouldUseSlowPutArrayStorage(ArrayModes arrayModes) +{ + return arrayModesInclude(arrayModes, SlowPutArrayStorageShape); +} + +inline bool shouldUseFastArrayStorage(ArrayModes arrayModes) +{ + return arrayModesInclude(arrayModes, ArrayStorageShape); +} + +inline bool shouldUseContiguous(ArrayModes arrayModes) +{ + return arrayModesInclude(arrayModes, ContiguousShape); +} + +inline bool shouldUseDouble(ArrayModes arrayModes) +{ + return arrayModesInclude(arrayModes, DoubleShape); +} + +inline bool shouldUseInt32(ArrayModes arrayModes) +{ + return arrayModesInclude(arrayModes, Int32Shape); +} + +inline bool hasSeenArray(ArrayModes arrayModes) +{ + return arrayModes & ALL_ARRAY_ARRAY_MODES; +} + +inline bool hasSeenNonArray(ArrayModes arrayModes) +{ + return arrayModes & ALL_NON_ARRAY_ARRAY_MODES; +} + +class ArrayProfile { +public: + ArrayProfile() + : m_bytecodeOffset(std::numeric_limits<unsigned>::max()) + , m_lastSeenStructureID(0) + , m_mayStoreToHole(false) + , m_outOfBounds(false) + , m_mayInterceptIndexedAccesses(false) + , m_usesOriginalArrayStructures(true) + , m_didPerformFirstRunPruning(false) + , m_observedArrayModes(0) + { + } + + ArrayProfile(unsigned bytecodeOffset) + : m_bytecodeOffset(bytecodeOffset) + , m_lastSeenStructureID(0) + , m_mayStoreToHole(false) + , m_outOfBounds(false) + , m_mayInterceptIndexedAccesses(false) + , m_usesOriginalArrayStructures(true) + , m_didPerformFirstRunPruning(false) + , m_observedArrayModes(0) + { + } + + unsigned bytecodeOffset() const { return m_bytecodeOffset; } + + StructureID* addressOfLastSeenStructureID() { return &m_lastSeenStructureID; } + ArrayModes* addressOfArrayModes() { return &m_observedArrayModes; } + bool* addressOfMayStoreToHole() { return &m_mayStoreToHole; } + + void setOutOfBounds() { m_outOfBounds = true; } + bool* addressOfOutOfBounds() { return &m_outOfBounds; } + + void observeStructure(Structure* structure) + { + m_lastSeenStructureID = structure->id(); + } + + void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*); + void computeUpdatedPrediction(const ConcurrentJITLocker&, CodeBlock*, Structure* lastSeenStructure); + + ArrayModes observedArrayModes(const ConcurrentJITLocker&) const { return m_observedArrayModes; } + bool mayInterceptIndexedAccesses(const ConcurrentJITLocker&) const { return m_mayInterceptIndexedAccesses; } + + bool mayStoreToHole(const ConcurrentJITLocker&) const { return m_mayStoreToHole; } + bool outOfBounds(const ConcurrentJITLocker&) const { return m_outOfBounds; } + + bool usesOriginalArrayStructures(const ConcurrentJITLocker&) const { return m_usesOriginalArrayStructures; } + + CString briefDescription(const ConcurrentJITLocker&, CodeBlock*); + CString briefDescriptionWithoutUpdating(const ConcurrentJITLocker&); + +private: + friend class LLIntOffsetsExtractor; + + static Structure* polymorphicStructure() { return static_cast<Structure*>(reinterpret_cast<void*>(1)); } + + unsigned m_bytecodeOffset; + StructureID m_lastSeenStructureID; + bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies. + bool m_outOfBounds; + bool m_mayInterceptIndexedAccesses : 1; + bool m_usesOriginalArrayStructures : 1; + bool m_didPerformFirstRunPruning : 1; + ArrayModes m_observedArrayModes; +}; + +typedef SegmentedVector<ArrayProfile, 4> ArrayProfileVector; + +} // namespace JSC + +#endif // ArrayProfile_h + diff --git a/Source/JavaScriptCore/bytecode/ByValInfo.h b/Source/JavaScriptCore/bytecode/ByValInfo.h new file mode 100644 index 000000000..b46a40d07 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ByValInfo.h @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ByValInfo_h +#define ByValInfo_h + +#include "ClassInfo.h" +#include "CodeLocation.h" +#include "CodeOrigin.h" +#include "IndexingType.h" +#include "JITStubRoutine.h" +#include "Structure.h" +#include "StructureStubInfo.h" + +namespace JSC { + +#if ENABLE(JIT) + +enum JITArrayMode { + JITInt32, + JITDouble, + JITContiguous, + JITArrayStorage, + JITDirectArguments, + JITScopedArguments, + JITInt8Array, + JITInt16Array, + JITInt32Array, + JITUint8Array, + JITUint8ClampedArray, + JITUint16Array, + JITUint32Array, + JITFloat32Array, + JITFloat64Array +}; + +inline bool isOptimizableIndexingType(IndexingType indexingType) +{ + switch (indexingType) { + case ALL_INT32_INDEXING_TYPES: + case ALL_DOUBLE_INDEXING_TYPES: + case ALL_CONTIGUOUS_INDEXING_TYPES: + case ARRAY_WITH_ARRAY_STORAGE_INDEXING_TYPES: + return true; + default: + return false; + } +} + +inline bool hasOptimizableIndexingForJSType(JSType type) +{ + switch (type) { + case DirectArgumentsType: + case ScopedArgumentsType: + return true; + default: + return false; + } +} + +inline bool hasOptimizableIndexingForClassInfo(const ClassInfo* classInfo) +{ + return isTypedView(classInfo->typedArrayStorageType); +} + +inline bool hasOptimizableIndexing(Structure* structure) +{ + return isOptimizableIndexingType(structure->indexingType()) + || hasOptimizableIndexingForJSType(structure->typeInfo().type()) + || hasOptimizableIndexingForClassInfo(structure->classInfo()); +} + +inline JITArrayMode jitArrayModeForIndexingType(IndexingType indexingType) +{ + switch (indexingType) { + case ALL_INT32_INDEXING_TYPES: + return JITInt32; + case ALL_DOUBLE_INDEXING_TYPES: + return JITDouble; + case ALL_CONTIGUOUS_INDEXING_TYPES: + return JITContiguous; + case ARRAY_WITH_ARRAY_STORAGE_INDEXING_TYPES: + return JITArrayStorage; + default: + CRASH(); + return JITContiguous; + } +} + +inline JITArrayMode jitArrayModeForJSType(JSType type) +{ + switch (type) { + case DirectArgumentsType: + return JITDirectArguments; + case ScopedArgumentsType: + return JITScopedArguments; + default: + RELEASE_ASSERT_NOT_REACHED(); + return JITContiguous; + } +} + +inline JITArrayMode jitArrayModeForClassInfo(const ClassInfo* classInfo) +{ + switch (classInfo->typedArrayStorageType) { + case TypeInt8: + return JITInt8Array; + case TypeInt16: + return JITInt16Array; + case TypeInt32: + return JITInt32Array; + case TypeUint8: + return JITUint8Array; + case TypeUint8Clamped: + return JITUint8ClampedArray; + case TypeUint16: + return JITUint16Array; + case TypeUint32: + return JITUint32Array; + case TypeFloat32: + return JITFloat32Array; + case TypeFloat64: + return JITFloat64Array; + default: + CRASH(); + return JITContiguous; + } +} + +inline bool jitArrayModePermitsPut(JITArrayMode mode) +{ + switch (mode) { + case JITDirectArguments: + case JITScopedArguments: + // We could support put_by_val on these at some point, but it's just not that profitable + // at the moment. + return false; + default: + return true; + } +} + +inline TypedArrayType typedArrayTypeForJITArrayMode(JITArrayMode mode) +{ + switch (mode) { + case JITInt8Array: + return TypeInt8; + case JITInt16Array: + return TypeInt16; + case JITInt32Array: + return TypeInt32; + case JITUint8Array: + return TypeUint8; + case JITUint8ClampedArray: + return TypeUint8Clamped; + case JITUint16Array: + return TypeUint16; + case JITUint32Array: + return TypeUint32; + case JITFloat32Array: + return TypeFloat32; + case JITFloat64Array: + return TypeFloat64; + default: + CRASH(); + return NotTypedArray; + } +} + +inline JITArrayMode jitArrayModeForStructure(Structure* structure) +{ + if (isOptimizableIndexingType(structure->indexingType())) + return jitArrayModeForIndexingType(structure->indexingType()); + + if (hasOptimizableIndexingForJSType(structure->typeInfo().type())) + return jitArrayModeForJSType(structure->typeInfo().type()); + + ASSERT(hasOptimizableIndexingForClassInfo(structure->classInfo())); + return jitArrayModeForClassInfo(structure->classInfo()); +} + +struct ByValInfo { + ByValInfo() { } + + ByValInfo(unsigned bytecodeIndex, CodeLocationJump notIndexJump, CodeLocationJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, int16_t badTypeJumpToDone, int16_t badTypeJumpToNextHotPath, int16_t returnAddressToSlowPath) + : bytecodeIndex(bytecodeIndex) + , notIndexJump(notIndexJump) + , badTypeJump(badTypeJump) + , arrayMode(arrayMode) + , arrayProfile(arrayProfile) + , badTypeJumpToDone(badTypeJumpToDone) + , badTypeJumpToNextHotPath(badTypeJumpToNextHotPath) + , returnAddressToSlowPath(returnAddressToSlowPath) + , slowPathCount(0) + , stubInfo(nullptr) + , tookSlowPath(false) + { + } + + unsigned bytecodeIndex; + CodeLocationJump notIndexJump; + CodeLocationJump badTypeJump; + JITArrayMode arrayMode; // The array mode that was baked into the inline JIT code. + ArrayProfile* arrayProfile; + int16_t badTypeJumpToDone; + int16_t badTypeJumpToNextHotPath; + int16_t returnAddressToSlowPath; + unsigned slowPathCount; + RefPtr<JITStubRoutine> stubRoutine; + Identifier cachedId; + StructureStubInfo* stubInfo; + bool tookSlowPath; +}; + +inline unsigned getByValInfoBytecodeIndex(ByValInfo* info) +{ + return info->bytecodeIndex; +} + +typedef HashMap<CodeOrigin, ByValInfo*, CodeOriginApproximateHash> ByValInfoMap; + +#else // ENABLE(JIT) + +typedef HashMap<int, void*> ByValInfoMap; + +#endif // ENABLE(JIT) + +} // namespace JSC + +#endif // ByValInfo_h + diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp new file mode 100644 index 000000000..54dfd168c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BytecodeBasicBlock.h" + +#include "CodeBlock.h" +#include "JSCInlines.h" +#include "PreciseJumpTargets.h" + +namespace JSC { + +void BytecodeBasicBlock::shrinkToFit() +{ + m_bytecodeOffsets.shrinkToFit(); + m_successors.shrinkToFit(); +} + +static bool isBranch(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_jmp: + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + case op_jneq_ptr: + case op_jless: + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_switch_imm: + case op_switch_char: + case op_switch_string: + case op_check_has_instance: + return true; + default: + return false; + } +} + +static bool isUnconditionalBranch(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_jmp: + return true; + default: + return false; + } +} + +static bool isTerminal(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_ret: + case op_end: + return true; + default: + return false; + } +} + +static bool isThrow(OpcodeID opcodeID) +{ + switch (opcodeID) { + case op_throw: + case op_throw_static_error: + return true; + default: + return false; + } +} + +static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset) +{ + if (opcodeID == op_catch) + return true; + + return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset); +} + +static void linkBlocks(BytecodeBasicBlock* predecessor, BytecodeBasicBlock* successor) +{ + predecessor->addSuccessor(successor); +} + +void computeBytecodeBasicBlocks(CodeBlock* codeBlock, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) +{ + Vector<unsigned, 32> jumpTargets; + computePreciseJumpTargets(codeBlock, jumpTargets); + + // Create the entry and exit basic blocks. + basicBlocks.reserveCapacity(jumpTargets.size() + 2); + + auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); + auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0); + linkBlocks(entry.get(), firstBlock.get()); + + basicBlocks.append(WTF::move(entry)); + BytecodeBasicBlock* current = firstBlock.get(); + basicBlocks.append(WTF::move(firstBlock)); + + auto exit = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::ExitBlock); + + bool nextInstructionIsLeader = false; + + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + unsigned instructionCount = codeBlock->instructions().size(); + for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + unsigned opcodeLength = opcodeLengths[opcodeID]; + + bool createdBlock = false; + // If the current bytecode is a jump target, then it's the leader of its own basic block. + if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { + auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength); + current = newBlock.get(); + basicBlocks.append(WTF::move(newBlock)); + createdBlock = true; + nextInstructionIsLeader = false; + bytecodeOffset += opcodeLength; + } + + // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. + if (isBranch(opcodeID) || isTerminal(opcodeID) || isThrow(opcodeID)) + nextInstructionIsLeader = true; + + if (createdBlock) + continue; + + // Otherwise, just add to the length of the current block. + current->addBytecodeLength(opcodeLength); + bytecodeOffset += opcodeLength; + } + + // Link basic blocks together. + for (unsigned i = 0; i < basicBlocks.size(); i++) { + BytecodeBasicBlock* block = basicBlocks[i].get(); + + if (block->isEntryBlock() || block->isExitBlock()) + continue; + + bool fallsThrough = true; + for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { + const Instruction& currentInstruction = instructionsBegin[bytecodeOffset]; + OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction.u.opcode); + unsigned opcodeLength = opcodeLengths[opcodeID]; + // If we found a terminal bytecode, link to the exit block. + if (isTerminal(opcodeID)) { + ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); + linkBlocks(block, exit.get()); + fallsThrough = false; + break; + } + + // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. + // If there isn't one, treat this throw as a terminal. This is true even if we have a finally + // block because the finally block will create its own catch, which will generate a HandlerInfo. + if (isThrow(opcodeID)) { + ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); + HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset); + fallsThrough = false; + if (!handler) { + linkBlocks(block, exit.get()); + break; + } + for (unsigned i = 0; i < basicBlocks.size(); i++) { + BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); + if (handler->target == otherBlock->leaderBytecodeOffset()) { + linkBlocks(block, otherBlock); + break; + } + } + break; + } + + // If we found a branch, link to the block(s) that we jump to. + if (isBranch(opcodeID)) { + ASSERT(bytecodeOffset + opcodeLength == block->leaderBytecodeOffset() + block->totalBytecodeLength()); + Vector<unsigned, 1> bytecodeOffsetsJumpedTo; + findJumpTargetsForBytecodeOffset(codeBlock, bytecodeOffset, bytecodeOffsetsJumpedTo); + + for (unsigned i = 0; i < basicBlocks.size(); i++) { + BytecodeBasicBlock* otherBlock = basicBlocks[i].get(); + if (bytecodeOffsetsJumpedTo.contains(otherBlock->leaderBytecodeOffset())) + linkBlocks(block, otherBlock); + } + + if (isUnconditionalBranch(opcodeID)) + fallsThrough = false; + + break; + } + bytecodeOffset += opcodeLength; + } + + // If we fall through then link to the next block in program order. + if (fallsThrough) { + ASSERT(i + 1 < basicBlocks.size()); + BytecodeBasicBlock* nextBlock = basicBlocks[i + 1].get(); + linkBlocks(block, nextBlock); + } + } + + basicBlocks.append(WTF::move(exit)); + + for (auto& basicBlock : basicBlocks) + basicBlock->shrinkToFit(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h new file mode 100644 index 000000000..bd7d3ae9b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeBasicBlock_h +#define BytecodeBasicBlock_h + +#include <limits.h> +#include <wtf/FastBitVector.h> +#include <wtf/HashMap.h> +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; + +class BytecodeBasicBlock { + WTF_MAKE_FAST_ALLOCATED; +public: + enum SpecialBlockType { EntryBlock, ExitBlock }; + BytecodeBasicBlock(unsigned start, unsigned length); + BytecodeBasicBlock(SpecialBlockType); + void shrinkToFit(); + + bool isEntryBlock() { return !m_leaderBytecodeOffset && !m_totalBytecodeLength; } + bool isExitBlock() { return m_leaderBytecodeOffset == UINT_MAX && m_totalBytecodeLength == UINT_MAX; } + + unsigned leaderBytecodeOffset() { return m_leaderBytecodeOffset; } + unsigned totalBytecodeLength() { return m_totalBytecodeLength; } + + Vector<unsigned>& bytecodeOffsets() { return m_bytecodeOffsets; } + void addBytecodeLength(unsigned); + + Vector<BytecodeBasicBlock*>& successors() { return m_successors; } + void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); } + + FastBitVector& in() { return m_in; } + FastBitVector& out() { return m_out; } + +private: + unsigned m_leaderBytecodeOffset; + unsigned m_totalBytecodeLength; + + Vector<unsigned> m_bytecodeOffsets; + Vector<BytecodeBasicBlock*> m_successors; + + FastBitVector m_in; + FastBitVector m_out; +}; + +void computeBytecodeBasicBlocks(CodeBlock*, Vector<std::unique_ptr<BytecodeBasicBlock>>&); + +inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length) + : m_leaderBytecodeOffset(start) + , m_totalBytecodeLength(length) +{ + m_bytecodeOffsets.append(m_leaderBytecodeOffset); +} + +inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockType blockType) + : m_leaderBytecodeOffset(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX) + , m_totalBytecodeLength(blockType == BytecodeBasicBlock::EntryBlock ? 0 : UINT_MAX) +{ +} + +inline void BytecodeBasicBlock::addBytecodeLength(unsigned bytecodeLength) +{ + m_bytecodeOffsets.append(m_leaderBytecodeOffset + m_totalBytecodeLength); + m_totalBytecodeLength += bytecodeLength; +} + +} // namespace JSC + +#endif // BytecodeBasicBlock_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeConventions.h b/Source/JavaScriptCore/bytecode/BytecodeConventions.h new file mode 100644 index 000000000..e375f263c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeConventions.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeConventions_h +#define BytecodeConventions_h + +// Register numbers used in bytecode operations have different meaning according to their ranges: +// 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame, see JSStack.h. +// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe. +// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock. +static const int FirstConstantRegisterIndex = 0x40000000; + +#endif // BytecodeConventions_h + diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp new file mode 100644 index 000000000..83f93c6dc --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BytecodeIntrinsicRegistry.h" + +#include "CommonIdentifiers.h" +#include "Nodes.h" + +namespace JSC { + +#define INITIALISE_BYTECODE_INTRINSIC_NAMES_TO_SET(name) m_bytecodeIntrinsicMap.add(propertyNames.name##PrivateName.impl(), &BytecodeIntrinsicNode::emit_intrinsic_##name); + +BytecodeIntrinsicRegistry::BytecodeIntrinsicRegistry(const CommonIdentifiers& propertyNames) + : m_propertyNames(propertyNames) + , m_bytecodeIntrinsicMap() +{ + JSC_COMMON_BYTECODE_INTRINSICS_EACH_NAME(INITIALISE_BYTECODE_INTRINSIC_NAMES_TO_SET) +} + +BytecodeIntrinsicNode::EmitterType BytecodeIntrinsicRegistry::lookup(const Identifier& ident) const +{ + if (!m_propertyNames.isPrivateName(ident)) + return nullptr; + auto iterator = m_bytecodeIntrinsicMap.find(ident.impl()); + if (iterator == m_bytecodeIntrinsicMap.end()) + return nullptr; + return iterator->value; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h new file mode 100644 index 000000000..87a578c16 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeIntrinsicRegistry.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2015 Yusuke Suzuki <utatane.tea@gmail.com>. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeIntrinsicRegistry_h +#define BytecodeIntrinsicRegistry_h + +#include "Identifier.h" +#include <wtf/HashTable.h> +#include <wtf/Noncopyable.h> + +namespace JSC { + +class CommonIdentifiers; +class BytecodeGenerator; +class BytecodeIntrinsicNode; +class RegisterID; + +class BytecodeIntrinsicRegistry { + WTF_MAKE_NONCOPYABLE(BytecodeIntrinsicRegistry); +public: + explicit BytecodeIntrinsicRegistry(const CommonIdentifiers&); + + typedef RegisterID* (BytecodeIntrinsicNode::* EmitterType)(BytecodeGenerator&, RegisterID*); + + EmitterType lookup(const Identifier&) const; + +private: + const CommonIdentifiers& m_propertyNames; + HashMap<RefPtr<UniquedStringImpl>, EmitterType, IdentifierRepHash> m_bytecodeIntrinsicMap; +}; + +} // namespace JSC + +#endif // BytecodeIntrinsicRegistry_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h new file mode 100644 index 000000000..d073ded25 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeKills_h +#define BytecodeKills_h + +#include "CodeBlock.h" +#include <wtf/FastBitVector.h> + +namespace JSC { + +class BytecodeLivenessAnalysis; + +class BytecodeKills { + WTF_MAKE_FAST_ALLOCATED; +public: + BytecodeKills() + : m_codeBlock(nullptr) + { + } + + // By convention, we say that non-local operands are never killed. + bool operandIsKilled(unsigned bytecodeIndex, int operand) const + { + ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); + VirtualRegister reg(operand); + if (reg.isLocal()) + return m_killSets[bytecodeIndex].contains(operand); + return false; + } + + bool operandIsKilled(Instruction* instruction, int operand) const + { + return operandIsKilled(instruction - m_codeBlock->instructions().begin(), operand); + } + + template<typename Functor> + void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const + { + ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); + m_killSets[bytecodeIndex].forEachLocal( + [&] (unsigned local) { + functor(virtualRegisterForLocal(local)); + }); + } + + template<typename Functor> + void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const + { + forEachOperandKilledAt(pc - m_codeBlock->instructions().begin(), functor); + } + +private: + friend class BytecodeLivenessAnalysis; + + class KillSet { + public: + KillSet() + : m_word(0) + { + } + + ~KillSet() + { + if (hasVector()) + delete vector(); + } + + void add(unsigned local) + { + if (isEmpty()) { + setOneItem(local); + return; + } + if (hasOneItem()) { + ASSERT(oneItem() != local); + Vector<unsigned>* vector = new Vector<unsigned>(); + vector->append(oneItem()); + vector->append(local); + setVector(vector); + return; + } + ASSERT(!vector()->contains(local)); + vector()->append(local); + } + + template<typename Functor> + void forEachLocal(const Functor& functor) + { + if (isEmpty()) + return; + if (hasOneItem()) { + functor(oneItem()); + return; + } + for (unsigned local : *vector()) + functor(local); + } + + bool contains(unsigned expectedLocal) + { + if (isEmpty()) + return false; + if (hasOneItem()) + return oneItem() == expectedLocal; + for (unsigned local : *vector()) { + if (local == expectedLocal) + return true; + } + return false; + } + + private: + bool isEmpty() const + { + return !m_word; + } + + bool hasOneItem() const + { + return m_word & 1; + } + + unsigned oneItem() const + { + return m_word >> 1; + } + + void setOneItem(unsigned value) + { + m_word = (value << 1) | 1; + } + + bool hasVector() const + { + return !isEmpty() && !hasOneItem(); + } + + Vector<unsigned>* vector() + { + return bitwise_cast<Vector<unsigned>*>(m_word); + } + + void setVector(Vector<unsigned>* value) + { + m_word = bitwise_cast<uintptr_t>(value); + } + + uintptr_t m_word; + }; + + CodeBlock* m_codeBlock; + std::unique_ptr<KillSet[]> m_killSets; +}; + +} // namespace JSC + +#endif // BytecodeKills_h + diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json new file mode 100644 index 000000000..2cf753bb8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeList.json @@ -0,0 +1,170 @@ +[ + { + "section" : "Bytecodes", "emitInHFile" : true, "emitInASMFile" : true, + "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_", + "bytecodes" : [ + { "name" : "op_enter", "length" : 1 }, + { "name" : "op_get_scope", "length" : 2 }, + { "name" : "op_create_direct_arguments", "length" : 2 }, + { "name" : "op_create_scoped_arguments", "length" : 3 }, + { "name" : "op_create_out_of_band_arguments", "length" : 2 }, + { "name" : "op_create_this", "length" : 5 }, + { "name" : "op_to_this", "length" : 4 }, + { "name" : "op_check_tdz", "length" : 2 }, + { "name" : "op_new_object", "length" : 4 }, + { "name" : "op_new_array", "length" : 5 }, + { "name" : "op_new_array_with_size", "length" : 4 }, + { "name" : "op_new_array_buffer", "length" : 5 }, + { "name" : "op_new_regexp", "length" : 3 }, + { "name" : "op_mov", "length" : 3 }, + { "name" : "op_not", "length" : 3 }, + { "name" : "op_eq", "length" : 4 }, + { "name" : "op_eq_null", "length" : 3 }, + { "name" : "op_neq", "length" : 4 }, + { "name" : "op_neq_null", "length" : 3 }, + { "name" : "op_stricteq", "length" : 4 }, + { "name" : "op_nstricteq", "length" : 4 }, + { "name" : "op_less", "length" : 4 }, + { "name" : "op_lesseq", "length" : 4 }, + { "name" : "op_greater", "length" : 4 }, + { "name" : "op_greatereq", "length" : 4 }, + { "name" : "op_inc", "length" : 2 }, + { "name" : "op_dec", "length" : 2 }, + { "name" : "op_to_number", "length" : 3 }, + { "name" : "op_to_string", "length" : 3 }, + { "name" : "op_negate", "length" : 3 }, + { "name" : "op_add", "length" : 5 }, + { "name" : "op_mul", "length" : 5 }, + { "name" : "op_div", "length" : 5 }, + { "name" : "op_mod", "length" : 4 }, + { "name" : "op_sub", "length" : 5 }, + { "name" : "op_lshift", "length" : 4 }, + { "name" : "op_rshift", "length" : 4 }, + { "name" : "op_urshift", "length" : 4 }, + { "name" : "op_unsigned", "length" : 3 }, + { "name" : "op_bitand", "length" : 5 }, + { "name" : "op_bitxor", "length" : 5 }, + { "name" : "op_bitor", "length" : 5 }, + { "name" : "op_check_has_instance", "length" : 5 }, + { "name" : "op_instanceof", "length" : 4 }, + { "name" : "op_typeof", "length" : 3 }, + { "name" : "op_is_undefined", "length" : 3 }, + { "name" : "op_is_boolean", "length" : 3 }, + { "name" : "op_is_number", "length" : 3 }, + { "name" : "op_is_string", "length" : 3 }, + { "name" : "op_is_object", "length" : 3 }, + { "name" : "op_is_object_or_null", "length" : 3 }, + { "name" : "op_is_function", "length" : 3 }, + { "name" : "op_in", "length" : 4 }, + { "name" : "op_get_by_id", "length" : 9 }, + { "name" : "op_get_by_id_out_of_line", "length" : 9 }, + { "name" : "op_get_array_length", "length" : 9 }, + { "name" : "op_put_by_id", "length" : 9 }, + { "name" : "op_put_by_id_out_of_line", "length" : 9 }, + { "name" : "op_put_by_id_transition_direct", "length" : 9 }, + { "name" : "op_put_by_id_transition_direct_out_of_line", "length" : 9 }, + { "name" : "op_put_by_id_transition_normal", "length" : 9 }, + { "name" : "op_put_by_id_transition_normal_out_of_line", "length" : 9 }, + { "name" : "op_del_by_id", "length" : 4 }, + { "name" : "op_get_by_val", "length" : 6 }, + { "name" : "op_put_by_val", "length" : 5 }, + { "name" : "op_put_by_val_direct", "length" : 5 }, + { "name" : "op_del_by_val", "length" : 4 }, + { "name" : "op_put_by_index", "length" : 4 }, + { "name" : "op_put_getter_by_id", "length" : 4 }, + { "name" : "op_put_setter_by_id", "length" : 4 }, + { "name" : "op_put_getter_setter", "length" : 5 }, + { "name" : "op_jmp", "length" : 2 }, + { "name" : "op_jtrue", "length" : 3 }, + { "name" : "op_jfalse", "length" : 3 }, + { "name" : "op_jeq_null", "length" : 3 }, + { "name" : "op_jneq_null", "length" : 3 }, + { "name" : "op_jneq_ptr", "length" : 4 }, + { "name" : "op_jless", "length" : 4 }, + { "name" : "op_jlesseq", "length" : 4 }, + { "name" : "op_jgreater", "length" : 4 }, + { "name" : "op_jgreatereq", "length" : 4 }, + { "name" : "op_jnless", "length" : 4 }, + { "name" : "op_jnlesseq", "length" : 4 }, + { "name" : "op_jngreater", "length" : 4 }, + { "name" : "op_jngreatereq", "length" : 4 }, + { "name" : "op_loop_hint", "length" : 1 }, + { "name" : "op_switch_imm", "length" : 4 }, + { "name" : "op_switch_char", "length" : 4 }, + { "name" : "op_switch_string", "length" : 4 }, + { "name" : "op_new_func", "length" : 4 }, + { "name" : "op_new_func_exp", "length" : 4 }, + { "name" : "op_call", "length" : 9 }, + { "name" : "op_call_eval", "length" : 9 }, + { "name" : "op_call_varargs", "length" : 9 }, + { "name" : "op_ret", "length" : 2 }, + { "name" : "op_construct", "length" : 9 }, + { "name" : "op_construct_varargs", "length" : 9 }, + { "name" : "op_strcat", "length" : 4 }, + { "name" : "op_to_primitive", "length" : 3 }, + { "name" : "op_resolve_scope", "length" : 7 }, + { "name" : "op_get_from_scope", "length" : 8 }, + { "name" : "op_put_to_scope", "length" : 7 }, + { "name" : "op_get_from_arguments", "length" : 5 }, + { "name" : "op_put_to_arguments", "length" : 4 }, + { "name" : "op_push_with_scope", "length" : 4 }, + { "name" : "op_create_lexical_environment", "length" : 5 }, + { "name" : "op_get_parent_scope", "length" : 3 }, + { "name" : "op_catch", "length" : 3 }, + { "name" : "op_throw", "length" : 2 }, + { "name" : "op_throw_static_error", "length" : 3 }, + { "name" : "op_debug", "length" : 3 }, + { "name" : "op_profile_will_call", "length" : 2 }, + { "name" : "op_profile_did_call", "length" : 2 }, + { "name" : "op_end", "length" : 2 }, + { "name" : "op_profile_type", "length" : 6 }, + { "name" : "op_profile_control_flow", "length" : 2 }, + { "name" : "op_get_enumerable_length", "length" : 3 }, + { "name" : "op_has_indexed_property", "length" : 5 }, + { "name" : "op_has_structure_property", "length" : 5 }, + { "name" : "op_has_generic_property", "length" : 4 }, + { "name" : "op_get_direct_pname", "length" : 7 }, + { "name" : "op_get_property_enumerator", "length" : 3 }, + { "name" : "op_enumerator_structure_pname", "length" : 4 }, + { "name" : "op_enumerator_generic_pname", "length" : 4 }, + { "name" : "op_to_index_string", "length" : 3 } + ] + }, + { + "section" : "CLoopHelpers", "emitInHFile" : true, "emitInASMFile" : false, "defaultLength" : 1, + "macroNameComponent" : "CLOOP_BYTECODE_HELPER", + "bytecodes" : [ + { "name" : "llint_entry" }, + { "name" : "getHostCallReturnValue" }, + { "name" : "llint_return_to_host" }, + { "name" : "llint_vm_entry_to_javascript" }, + { "name" : "llint_vm_entry_to_native" }, + { "name" : "llint_cloop_did_return_from_js_1" }, + { "name" : "llint_cloop_did_return_from_js_2" }, + { "name" : "llint_cloop_did_return_from_js_3" }, + { "name" : "llint_cloop_did_return_from_js_4" }, + { "name" : "llint_cloop_did_return_from_js_5" }, + { "name" : "llint_cloop_did_return_from_js_6" }, + { "name" : "llint_cloop_did_return_from_js_7" }, + { "name" : "llint_cloop_did_return_from_js_8" } + ] + }, + { + "section" : "NativeHelpers", "emitInHFile" : true, "emitInASMFile" : true, "defaultLength" : 1, + "macroNameComponent" : "BYTECODE_HELPER", + "bytecodes" : [ + { "name" : "llint_program_prologue" }, + { "name" : "llint_eval_prologue" }, + { "name" : "llint_function_for_call_prologue" }, + { "name" : "llint_function_for_construct_prologue" }, + { "name" : "llint_function_for_call_arity_check" }, + { "name" : "llint_function_for_construct_arity_check" }, + { "name" : "llint_generic_return_point" }, + { "name" : "llint_throw_from_slow_path_trampoline" }, + { "name" : "llint_throw_during_call_trampoline" }, + { "name" : "llint_native_call_trampoline" }, + { "name" : "llint_native_construct_trampoline" }, + { "name" : "handleUncaughtException" } + ] + } +] diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp new file mode 100644 index 000000000..c77abeaa2 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp @@ -0,0 +1,348 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BytecodeLivenessAnalysis.h" + +#include "BytecodeKills.h" +#include "BytecodeLivenessAnalysisInlines.h" +#include "BytecodeUseDef.h" +#include "CodeBlock.h" +#include "FullBytecodeLiveness.h" +#include "PreciseJumpTargets.h" + +namespace JSC { + +BytecodeLivenessAnalysis::BytecodeLivenessAnalysis(CodeBlock* codeBlock) + : m_codeBlock(codeBlock) +{ + ASSERT(m_codeBlock); + compute(); +} + +static bool isValidRegisterForLiveness(CodeBlock* codeBlock, int operand) +{ + if (codeBlock->isConstantRegisterIndex(operand)) + return false; + + VirtualRegister virtualReg(operand); + return virtualReg.isLocal(); +} + +static unsigned getLeaderOffsetForBasicBlock(std::unique_ptr<BytecodeBasicBlock>* basicBlock) +{ + return (*basicBlock)->leaderBytecodeOffset(); +} + +static BytecodeBasicBlock* findBasicBlockWithLeaderOffset(Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned leaderOffset) +{ + return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(basicBlocks, basicBlocks.size(), leaderOffset, getLeaderOffsetForBasicBlock)).get(); +} + +static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset) +{ + unsigned leaderOffset = block->leaderBytecodeOffset(); + return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalBytecodeLength(); +} + +static BytecodeBasicBlock* findBasicBlockForBytecodeOffset(Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset) +{ +/* + for (unsigned i = 0; i < basicBlocks.size(); i++) { + if (blockContainsBytecodeOffset(basicBlocks[i].get(), bytecodeOffset)) + return basicBlocks[i].get(); + } + return 0; +*/ + std::unique_ptr<BytecodeBasicBlock>* basicBlock = approximateBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>( + basicBlocks, basicBlocks.size(), bytecodeOffset, getLeaderOffsetForBasicBlock); + // We found the block we were looking for. + if (blockContainsBytecodeOffset((*basicBlock).get(), bytecodeOffset)) + return (*basicBlock).get(); + + // Basic block is to the left of the returned block. + if (bytecodeOffset < (*basicBlock)->leaderBytecodeOffset()) { + ASSERT(basicBlock - 1 >= basicBlocks.data()); + ASSERT(blockContainsBytecodeOffset(basicBlock[-1].get(), bytecodeOffset)); + return basicBlock[-1].get(); + } + + // Basic block is to the right of the returned block. + ASSERT(&basicBlock[1] <= &basicBlocks.last()); + ASSERT(blockContainsBytecodeOffset(basicBlock[1].get(), bytecodeOffset)); + return basicBlock[1].get(); +} + +// Simplified interface to bytecode use/def, which determines defs first and then uses, and includes +// exception handlers in the uses. +template<typename UseFunctor, typename DefFunctor> +static void stepOverInstruction(CodeBlock* codeBlock, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def) +{ + // This abstractly execute the instruction in reverse. Instructions logically first use operands and + // then define operands. This logical ordering is necessary for operations that use and def the same + // operand, like: + // + // op_add loc1, loc1, loc2 + // + // The use of loc1 happens before the def of loc1. That's a semantic requirement since the add + // operation cannot travel forward in time to read the value that it will produce after reading that + // value. Since we are executing in reverse, this means that we must do defs before uses (reverse of + // uses before defs). + // + // Since this is a liveness analysis, this ordering ends up being particularly important: if we did + // uses before defs, then the add operation above would appear to not have loc1 live, since we'd + // first add it to the out set (the use), and then we'd remove it (the def). + + computeDefsForBytecodeOffset( + codeBlock, bytecodeOffset, + [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) { + if (isValidRegisterForLiveness(codeBlock, operand)) + def(VirtualRegister(operand).toLocal()); + }); + + computeUsesForBytecodeOffset( + codeBlock, bytecodeOffset, + [&] (CodeBlock* codeBlock, Instruction*, OpcodeID, int operand) { + if (isValidRegisterForLiveness(codeBlock, operand)) + use(VirtualRegister(operand).toLocal()); + }); + + // If we have an exception handler, we want the live-in variables of the + // exception handler block to be included in the live-in of this particular bytecode. + if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset)) { + BytecodeBasicBlock* handlerBlock = findBasicBlockWithLeaderOffset(basicBlocks, handler->target); + ASSERT(handlerBlock); + handlerBlock->in().forEachSetBit(use); + } +} + +static void stepOverInstruction(CodeBlock* codeBlock, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned bytecodeOffset, FastBitVector& out) +{ + stepOverInstruction( + codeBlock, basicBlocks, bytecodeOffset, + [&] (unsigned bitIndex) { + // This is the use functor, so we set the bit. + out.set(bitIndex); + }, + [&] (unsigned bitIndex) { + // This is the def functor, so we clear the bit. + out.clear(bitIndex); + }); +} + +static void computeLocalLivenessForBytecodeOffset(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks, unsigned targetOffset, FastBitVector& result) +{ + ASSERT(!block->isExitBlock()); + ASSERT(!block->isEntryBlock()); + + FastBitVector out = block->out(); + + for (int i = block->bytecodeOffsets().size() - 1; i >= 0; i--) { + unsigned bytecodeOffset = block->bytecodeOffsets()[i]; + if (targetOffset > bytecodeOffset) + break; + + stepOverInstruction(codeBlock, basicBlocks, bytecodeOffset, out); + } + + result.set(out); +} + +static void computeLocalLivenessForBlock(CodeBlock* codeBlock, BytecodeBasicBlock* block, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) +{ + if (block->isExitBlock() || block->isEntryBlock()) + return; + computeLocalLivenessForBytecodeOffset(codeBlock, block, basicBlocks, block->leaderBytecodeOffset(), block->in()); +} + +void BytecodeLivenessAnalysis::runLivenessFixpoint() +{ + UnlinkedCodeBlock* unlinkedCodeBlock = m_codeBlock->unlinkedCodeBlock(); + unsigned numberOfVariables = unlinkedCodeBlock->m_numCalleeRegisters; + + for (unsigned i = 0; i < m_basicBlocks.size(); i++) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + block->in().resize(numberOfVariables); + block->out().resize(numberOfVariables); + } + + bool changed; + m_basicBlocks.last()->in().clearAll(); + m_basicBlocks.last()->out().clearAll(); + FastBitVector newOut; + newOut.resize(m_basicBlocks.last()->out().numBits()); + do { + changed = false; + for (unsigned i = m_basicBlocks.size() - 1; i--;) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + newOut.clearAll(); + for (unsigned j = 0; j < block->successors().size(); j++) + newOut.merge(block->successors()[j]->in()); + bool outDidChange = block->out().setAndCheck(newOut); + computeLocalLivenessForBlock(m_codeBlock, block, m_basicBlocks); + changed |= outDidChange; + } + } while (changed); +} + +void BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector& result) +{ + BytecodeBasicBlock* block = findBasicBlockForBytecodeOffset(m_basicBlocks, bytecodeOffset); + ASSERT(block); + ASSERT(!block->isEntryBlock()); + ASSERT(!block->isExitBlock()); + result.resize(block->out().numBits()); + computeLocalLivenessForBytecodeOffset(m_codeBlock, block, m_basicBlocks, bytecodeOffset, result); +} + +bool BytecodeLivenessAnalysis::operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset) +{ + if (operandIsAlwaysLive(operand)) + return true; + FastBitVector result; + getLivenessInfoAtBytecodeOffset(bytecodeOffset, result); + return operandThatIsNotAlwaysLiveIsLive(result, operand); +} + +FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset) +{ + FastBitVector out; + getLivenessInfoAtBytecodeOffset(bytecodeOffset, out); + return out; +} + +void BytecodeLivenessAnalysis::computeFullLiveness(FullBytecodeLiveness& result) +{ + FastBitVector out; + + result.m_map.resize(m_codeBlock->instructions().size()); + + for (unsigned i = m_basicBlocks.size(); i--;) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + if (block->isEntryBlock() || block->isExitBlock()) + continue; + + out = block->out(); + + for (unsigned i = block->bytecodeOffsets().size(); i--;) { + unsigned bytecodeOffset = block->bytecodeOffsets()[i]; + stepOverInstruction(m_codeBlock, m_basicBlocks, bytecodeOffset, out); + result.m_map[bytecodeOffset] = out; + } + } +} + +void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) +{ + FastBitVector out; + + result.m_codeBlock = m_codeBlock; + result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(m_codeBlock->instructions().size()); + + for (unsigned i = m_basicBlocks.size(); i--;) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + if (block->isEntryBlock() || block->isExitBlock()) + continue; + + out = block->out(); + + for (unsigned i = block->bytecodeOffsets().size(); i--;) { + unsigned bytecodeOffset = block->bytecodeOffsets()[i]; + stepOverInstruction( + m_codeBlock, m_basicBlocks, bytecodeOffset, + [&] (unsigned index) { + // This is for uses. + if (out.get(index)) + return; + result.m_killSets[bytecodeOffset].add(index); + out.set(index); + }, + [&] (unsigned index) { + // This is for defs. + out.clear(index); + }); + } + } +} + +void BytecodeLivenessAnalysis::dumpResults() +{ + Interpreter* interpreter = m_codeBlock->vm()->interpreter; + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + for (unsigned i = 0; i < m_basicBlocks.size(); i++) { + BytecodeBasicBlock* block = m_basicBlocks[i].get(); + dataLogF("\nBytecode basic block %u: %p (offset: %u, length: %u)\n", i, block, block->leaderBytecodeOffset(), block->totalBytecodeLength()); + dataLogF("Successors: "); + for (unsigned j = 0; j < block->successors().size(); j++) { + BytecodeBasicBlock* successor = block->successors()[j]; + dataLogF("%p ", successor); + } + dataLogF("\n"); + if (block->isEntryBlock()) { + dataLogF("Entry block %p\n", block); + continue; + } + if (block->isExitBlock()) { + dataLogF("Exit block: %p\n", block); + continue; + } + for (unsigned bytecodeOffset = block->leaderBytecodeOffset(); bytecodeOffset < block->leaderBytecodeOffset() + block->totalBytecodeLength();) { + const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset]; + + dataLogF("Live variables: "); + FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(bytecodeOffset); + for (unsigned j = 0; j < liveBefore.numBits(); j++) { + if (liveBefore.get(j)) + dataLogF("%u ", j); + } + dataLogF("\n"); + m_codeBlock->dumpBytecode(WTF::dataFile(), m_codeBlock->globalObject()->globalExec(), instructionsBegin, currentInstruction); + + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + unsigned opcodeLength = opcodeLengths[opcodeID]; + bytecodeOffset += opcodeLength; + } + + dataLogF("Live variables: "); + FastBitVector liveAfter = block->out(); + for (unsigned j = 0; j < liveAfter.numBits(); j++) { + if (liveAfter.get(j)) + dataLogF("%u ", j); + } + dataLogF("\n"); + } +} + +void BytecodeLivenessAnalysis::compute() +{ + computeBytecodeBasicBlocks(m_codeBlock, m_basicBlocks); + ASSERT(m_basicBlocks.size()); + runLivenessFixpoint(); + + if (Options::dumpBytecodeLivenessResults()) + dumpResults(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h new file mode 100644 index 000000000..ece16f21f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeLivenessAnalysis_h +#define BytecodeLivenessAnalysis_h + +#include "BytecodeBasicBlock.h" +#include <wtf/FastBitVector.h> +#include <wtf/HashMap.h> +#include <wtf/Vector.h> + +namespace JSC { + +class BytecodeKills; +class CodeBlock; +class FullBytecodeLiveness; + +class BytecodeLivenessAnalysis { + WTF_MAKE_FAST_ALLOCATED; + WTF_MAKE_NONCOPYABLE(BytecodeLivenessAnalysis); +public: + BytecodeLivenessAnalysis(CodeBlock*); + + bool operandIsLiveAtBytecodeOffset(int operand, unsigned bytecodeOffset); + FastBitVector getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset); + + void computeFullLiveness(FullBytecodeLiveness& result); + void computeKills(BytecodeKills& result); + +private: + void compute(); + void runLivenessFixpoint(); + void dumpResults(); + + void getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset, FastBitVector&); + + CodeBlock* m_codeBlock; + Vector<std::unique_ptr<BytecodeBasicBlock>> m_basicBlocks; +}; + +inline bool operandIsAlwaysLive(int operand); +inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand); +inline bool operandIsLive(const FastBitVector& out, int operand); + +} // namespace JSC + +#endif // BytecodeLivenessAnalysis_h diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h new file mode 100644 index 000000000..9b5c755fc --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeLivenessAnalysisInlines_h +#define BytecodeLivenessAnalysisInlines_h + +#include "BytecodeLivenessAnalysis.h" +#include "CodeBlock.h" +#include "Operations.h" + +namespace JSC { + +inline bool operandIsAlwaysLive(int operand) +{ + return !VirtualRegister(operand).isLocal(); +} + +inline bool operandThatIsNotAlwaysLiveIsLive(const FastBitVector& out, int operand) +{ + unsigned local = VirtualRegister(operand).toLocal(); + if (local >= out.numBits()) + return false; + return out.get(local); +} + +inline bool operandIsLive(const FastBitVector& out, int operand) +{ + return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand); +} + +} // namespace JSC + +#endif // BytecodeLivenessAnalysisInlines_h + diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h new file mode 100644 index 000000000..0383a0fbd --- /dev/null +++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h @@ -0,0 +1,386 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BytecodeUseDef_h +#define BytecodeUseDef_h + +#include "CodeBlock.h" + +namespace JSC { + +template<typename Functor> +void computeUsesForBytecodeOffset( + CodeBlock* codeBlock, unsigned bytecodeOffset, const Functor& functor) +{ + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + Instruction* instruction = &instructionsBegin[bytecodeOffset]; + OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode); + switch (opcodeID) { + // No uses. + case op_new_regexp: + case op_new_array_buffer: + case op_throw_static_error: + case op_debug: + case op_jneq_ptr: + case op_loop_hint: + case op_jmp: + case op_new_object: + case op_enter: + case op_catch: + case op_profile_control_flow: + case op_create_direct_arguments: + case op_create_out_of_band_arguments: + return; + case op_get_scope: + case op_to_this: + case op_check_tdz: + case op_profile_will_call: + case op_profile_did_call: + case op_profile_type: + case op_throw: + case op_end: + case op_ret: + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + case op_dec: + case op_inc: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + return; + } + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_jless: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + return; + } + case op_put_by_val_direct: + case op_put_by_val: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_put_by_index: + case op_put_by_id_transition_direct: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_normal_out_of_line: + case op_put_by_id_out_of_line: + case op_put_by_id: + case op_put_getter_by_id: + case op_put_setter_by_id: + case op_put_to_scope: + case op_put_to_arguments: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_put_getter_setter: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + return; + } + case op_get_property_enumerator: + case op_get_enumerable_length: + case op_new_func_exp: + case op_to_index_string: + case op_create_lexical_environment: + case op_resolve_scope: + case op_get_from_scope: + case op_to_primitive: + case op_get_by_id: + case op_get_by_id_out_of_line: + case op_get_array_length: + case op_typeof: + case op_is_undefined: + case op_is_boolean: + case op_is_number: + case op_is_string: + case op_is_object: + case op_is_object_or_null: + case op_is_function: + case op_to_number: + case op_to_string: + case op_negate: + case op_neq_null: + case op_eq_null: + case op_not: + case op_mov: + case op_new_array_with_size: + case op_create_this: + case op_del_by_id: + case op_unsigned: + case op_new_func: + case op_get_parent_scope: + case op_create_scoped_arguments: + case op_get_from_arguments: { + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + return; + } + case op_has_generic_property: + case op_has_indexed_property: + case op_enumerator_structure_pname: + case op_enumerator_generic_pname: + case op_get_by_val: + case op_in: + case op_instanceof: + case op_check_has_instance: + case op_add: + case op_mul: + case op_div: + case op_mod: + case op_sub: + case op_lshift: + case op_rshift: + case op_urshift: + case op_bitand: + case op_bitxor: + case op_bitor: + case op_less: + case op_lesseq: + case op_greater: + case op_greatereq: + case op_nstricteq: + case op_stricteq: + case op_neq: + case op_eq: + case op_push_with_scope: + case op_del_by_val: { + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_has_structure_property: + case op_construct_varargs: + case op_call_varargs: { + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + return; + } + case op_get_direct_pname: { + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); + return; + } + case op_switch_string: + case op_switch_char: + case op_switch_imm: { + functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); + return; + } + case op_new_array: + case op_strcat: { + int base = instruction[2].u.operand; + int count = instruction[3].u.operand; + for (int i = 0; i < count; i++) + functor(codeBlock, instruction, opcodeID, base - i); + return; + } + case op_construct: + case op_call_eval: + case op_call: { + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + int argCount = instruction[3].u.operand; + int registerOffset = -instruction[4].u.operand; + int lastArg = registerOffset + CallFrame::thisArgumentOffset(); + for (int i = 0; i < argCount; i++) + functor(codeBlock, instruction, opcodeID, lastArg + i); + return; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +} + +template<typename Functor> +void computeDefsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, const Functor& functor) +{ + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + Instruction* instruction = &instructionsBegin[bytecodeOffset]; + OpcodeID opcodeID = interpreter->getOpcodeID(instruction->u.opcode); + switch (opcodeID) { + // These don't define anything. + case op_put_to_scope: + case op_end: + case op_profile_will_call: + case op_profile_did_call: + case op_throw: + case op_throw_static_error: + case op_debug: + case op_ret: + case op_jmp: + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + case op_jneq_ptr: + case op_jless: + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + case op_loop_hint: + case op_switch_imm: + case op_switch_char: + case op_switch_string: + case op_put_by_id: + case op_put_by_id_out_of_line: + case op_put_by_id_transition_direct: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_normal_out_of_line: + case op_put_getter_by_id: + case op_put_setter_by_id: + case op_put_getter_setter: + case op_put_by_val: + case op_put_by_val_direct: + case op_put_by_index: + case op_profile_type: + case op_profile_control_flow: + case op_put_to_arguments: +#define LLINT_HELPER_OPCODES(opcode, length) case opcode: + FOR_EACH_LLINT_OPCODE_EXTENSION(LLINT_HELPER_OPCODES); +#undef LLINT_HELPER_OPCODES + return; + // These all have a single destination for the first argument. + case op_to_index_string: + case op_get_enumerable_length: + case op_has_indexed_property: + case op_has_structure_property: + case op_has_generic_property: + case op_get_direct_pname: + case op_get_property_enumerator: + case op_enumerator_structure_pname: + case op_enumerator_generic_pname: + case op_get_parent_scope: + case op_push_with_scope: + case op_create_lexical_environment: + case op_resolve_scope: + case op_strcat: + case op_to_primitive: + case op_create_this: + case op_new_array: + case op_new_array_buffer: + case op_new_array_with_size: + case op_new_regexp: + case op_new_func: + case op_new_func_exp: + case op_call_varargs: + case op_construct_varargs: + case op_get_from_scope: + case op_call: + case op_call_eval: + case op_construct: + case op_get_by_id: + case op_get_by_id_out_of_line: + case op_get_array_length: + case op_check_has_instance: + case op_instanceof: + case op_get_by_val: + case op_typeof: + case op_is_undefined: + case op_is_boolean: + case op_is_number: + case op_is_string: + case op_is_object: + case op_is_object_or_null: + case op_is_function: + case op_in: + case op_to_number: + case op_to_string: + case op_negate: + case op_add: + case op_mul: + case op_div: + case op_mod: + case op_sub: + case op_lshift: + case op_rshift: + case op_urshift: + case op_bitand: + case op_bitxor: + case op_bitor: + case op_inc: + case op_dec: + case op_eq: + case op_neq: + case op_stricteq: + case op_nstricteq: + case op_less: + case op_lesseq: + case op_greater: + case op_greatereq: + case op_neq_null: + case op_eq_null: + case op_not: + case op_mov: + case op_new_object: + case op_to_this: + case op_check_tdz: + case op_get_scope: + case op_create_direct_arguments: + case op_create_scoped_arguments: + case op_create_out_of_band_arguments: + case op_del_by_id: + case op_del_by_val: + case op_unsigned: + case op_get_from_arguments: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + return; + } + case op_catch: { + functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); + functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); + return; + } + case op_enter: { + for (unsigned i = codeBlock->m_numVars; i--;) + functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset()); + return; + } } +} + +} // namespace JSC + +#endif // BytecodeUseDef_h + diff --git a/Source/JavaScriptCore/bytecode/CallEdge.cpp b/Source/JavaScriptCore/bytecode/CallEdge.cpp new file mode 100644 index 000000000..dffff6dfd --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallEdge.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallEdge.h" + +namespace JSC { + +void CallEdge::dump(PrintStream& out) const +{ + out.print("<", m_callee, ", count: ", m_count, ">"); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/CallEdge.h b/Source/JavaScriptCore/bytecode/CallEdge.h new file mode 100644 index 000000000..304520951 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallEdge.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallEdge_h +#define CallEdge_h + +#include "CallVariant.h" + +namespace JSC { + +class CallEdge { +public: + CallEdge(); + CallEdge(CallVariant, uint32_t); + + bool operator!() const { return !m_callee; } + + CallVariant callee() const { return m_callee; } + uint32_t count() const { return m_count; } + + CallEdge despecifiedClosure() const + { + return CallEdge(m_callee.despecifiedClosure(), m_count); + } + + void dump(PrintStream&) const; + +private: + CallVariant m_callee; + uint32_t m_count; +}; + +inline CallEdge::CallEdge(CallVariant callee, uint32_t count) + : m_callee(callee) + , m_count(count) +{ +} + +inline CallEdge::CallEdge() + : CallEdge(CallVariant(), 0) +{ +} + +typedef Vector<CallEdge, 1> CallEdgeList; + +} // namespace JSC + +#endif // CallEdge_h + diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp new file mode 100644 index 000000000..7292f7364 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallLinkInfo.h" + +#include "DFGOperations.h" +#include "DFGThunks.h" +#include "JSCInlines.h" +#include "Repatch.h" +#include "RepatchBuffer.h" +#include <wtf/ListDump.h> +#include <wtf/NeverDestroyed.h> + +#if ENABLE(JIT) +namespace JSC { + +void CallLinkInfo::clearStub() +{ + if (!stub()) + return; + + m_stub->clearCallNodesFor(this); + m_stub = nullptr; +} + +void CallLinkInfo::unlink(RepatchBuffer& repatchBuffer) +{ + if (!isLinked()) { + // We could be called even if we're not linked anymore because of how polymorphic calls + // work. Each callsite within the polymorphic call stub may separately ask us to unlink(). + RELEASE_ASSERT(!isOnList()); + return; + } + + unlinkFor(repatchBuffer, *this); + + // It will be on a list if the callee has a code block. + if (isOnList()) + remove(); +} + +void CallLinkInfo::visitWeak(RepatchBuffer& repatchBuffer) +{ + auto handleSpecificCallee = [&] (JSFunction* callee) { + if (Heap::isMarked(callee->executable())) + m_hasSeenClosure = true; + else + m_clearedByGC = true; + }; + + if (isLinked()) { + if (stub()) { + if (!stub()->visitWeak(repatchBuffer)) { + if (Options::verboseOSR()) { + dataLog( + "Clearing closure call from ", *repatchBuffer.codeBlock(), " to ", + listDump(stub()->variants()), ", stub routine ", RawPointer(stub()), + ".\n"); + } + unlink(repatchBuffer); + m_clearedByGC = true; + } + } else if (!Heap::isMarked(m_callee.get())) { + if (Options::verboseOSR()) { + dataLog( + "Clearing call from ", *repatchBuffer.codeBlock(), " to ", + RawPointer(m_callee.get()), " (", + m_callee.get()->executable()->hashFor(specializationKind()), + ").\n"); + } + handleSpecificCallee(m_callee.get()); + unlink(repatchBuffer); + } + } + if (haveLastSeenCallee() && !Heap::isMarked(lastSeenCallee())) { + handleSpecificCallee(lastSeenCallee()); + clearLastSeenCallee(); + } +} + +} // namespace JSC +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/bytecode/CallLinkInfo.h b/Source/JavaScriptCore/bytecode/CallLinkInfo.h new file mode 100644 index 000000000..061c6f83e --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallLinkInfo.h @@ -0,0 +1,317 @@ +/* + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallLinkInfo_h +#define CallLinkInfo_h + +#include "CodeLocation.h" +#include "CodeSpecializationKind.h" +#include "JITWriteBarrier.h" +#include "JSFunction.h" +#include "Opcode.h" +#include "PolymorphicCallStubRoutine.h" +#include "WriteBarrier.h" +#include <wtf/SentinelLinkedList.h> + +namespace JSC { + +#if ENABLE(JIT) + +class RepatchBuffer; + +class CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> { +public: + enum CallType { None, Call, CallVarargs, Construct, ConstructVarargs }; + static CallType callTypeFor(OpcodeID opcodeID) + { + if (opcodeID == op_call || opcodeID == op_call_eval) + return Call; + if (opcodeID == op_construct) + return Construct; + if (opcodeID == op_construct_varargs) + return ConstructVarargs; + ASSERT(opcodeID == op_call_varargs); + return CallVarargs; + } + + CallLinkInfo() + : m_registerPreservationMode(static_cast<unsigned>(RegisterPreservationNotRequired)) + , m_hasSeenShouldRepatch(false) + , m_hasSeenClosure(false) + , m_clearedByGC(false) + , m_callType(None) + , m_maxNumArguments(0) + , m_slowPathCount(0) + { + } + + ~CallLinkInfo() + { + clearStub(); + + if (isOnList()) + remove(); + } + + static CodeSpecializationKind specializationKindFor(CallType callType) + { + return specializationFromIsConstruct(callType == Construct || callType == ConstructVarargs); + } + CodeSpecializationKind specializationKind() const + { + return specializationKindFor(static_cast<CallType>(m_callType)); + } + + RegisterPreservationMode registerPreservationMode() const + { + return static_cast<RegisterPreservationMode>(m_registerPreservationMode); + } + + bool isLinked() { return m_stub || m_callee; } + void unlink(RepatchBuffer&); + + void setUpCall(CallType callType, CodeOrigin codeOrigin, unsigned calleeGPR) + { + m_callType = callType; + m_codeOrigin = codeOrigin; + m_calleeGPR = calleeGPR; + } + + void setCallLocations(CodeLocationNearCall callReturnLocation, CodeLocationDataLabelPtr hotPathBegin, + CodeLocationNearCall hotPathOther) + { + m_callReturnLocation = callReturnLocation; + m_hotPathBegin = hotPathBegin; + m_hotPathOther = hotPathOther; + } + + void setUpCallFromFTL(CallType callType, CodeOrigin codeOrigin, + CodeLocationNearCall callReturnLocation, CodeLocationDataLabelPtr hotPathBegin, + CodeLocationNearCall hotPathOther, unsigned calleeGPR) + { + m_registerPreservationMode = static_cast<unsigned>(MustPreserveRegisters); + m_callType = callType; + m_codeOrigin = codeOrigin; + m_callReturnLocation = callReturnLocation; + m_hotPathBegin = hotPathBegin; + m_hotPathOther = hotPathOther; + m_calleeGPR = calleeGPR; + } + + CodeLocationNearCall callReturnLocation() + { + return m_callReturnLocation; + } + + CodeLocationDataLabelPtr hotPathBegin() + { + return m_hotPathBegin; + } + + CodeLocationNearCall hotPathOther() + { + return m_hotPathOther; + } + + void setCallee(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, JSFunction* callee) + { + m_callee.set(vm, location, owner, callee); + } + + void clearCallee() + { + m_callee.clear(); + } + + JSFunction* callee() + { + return m_callee.get(); + } + + void setLastSeenCallee(VM& vm, const JSCell* owner, JSFunction* callee) + { + m_lastSeenCallee.set(vm, owner, callee); + } + + void clearLastSeenCallee() + { + m_lastSeenCallee.clear(); + } + + JSFunction* lastSeenCallee() + { + return m_lastSeenCallee.get(); + } + + bool haveLastSeenCallee() + { + return !!m_lastSeenCallee; + } + + void setStub(PassRefPtr<PolymorphicCallStubRoutine> newStub) + { + clearStub(); + m_stub = newStub; + } + + void clearStub(); + + PolymorphicCallStubRoutine* stub() + { + return m_stub.get(); + } + + void setSlowStub(PassRefPtr<JITStubRoutine> newSlowStub) + { + m_slowStub = newSlowStub; + } + + void clearSlowStub() + { + m_slowStub = nullptr; + } + + JITStubRoutine* slowStub() + { + return m_slowStub.get(); + } + + bool seenOnce() + { + return m_hasSeenShouldRepatch; + } + + void clearSeen() + { + m_hasSeenShouldRepatch = false; + } + + void setSeen() + { + m_hasSeenShouldRepatch = true; + } + + bool hasSeenClosure() + { + return m_hasSeenClosure; + } + + void setHasSeenClosure() + { + m_hasSeenClosure = true; + } + + bool clearedByGC() + { + return m_clearedByGC; + } + + void setCallType(CallType callType) + { + m_callType = callType; + } + + CallType callType() + { + return static_cast<CallType>(m_callType); + } + + uint8_t* addressOfMaxNumArguments() + { + return &m_maxNumArguments; + } + + uint8_t maxNumArguments() + { + return m_maxNumArguments; + } + + static ptrdiff_t offsetOfSlowPathCount() + { + return OBJECT_OFFSETOF(CallLinkInfo, m_slowPathCount); + } + + void setCalleeGPR(unsigned calleeGPR) + { + m_calleeGPR = calleeGPR; + } + + unsigned calleeGPR() + { + return m_calleeGPR; + } + + uint32_t slowPathCount() + { + return m_slowPathCount; + } + + void setCodeOrigin(CodeOrigin codeOrigin) + { + m_codeOrigin = codeOrigin; + } + + CodeOrigin codeOrigin() + { + return m_codeOrigin; + } + + void visitWeak(RepatchBuffer&); + +private: + CodeLocationNearCall m_callReturnLocation; + CodeLocationDataLabelPtr m_hotPathBegin; + CodeLocationNearCall m_hotPathOther; + JITWriteBarrier<JSFunction> m_callee; + WriteBarrier<JSFunction> m_lastSeenCallee; + RefPtr<PolymorphicCallStubRoutine> m_stub; + RefPtr<JITStubRoutine> m_slowStub; + unsigned m_registerPreservationMode : 1; // Real type is RegisterPreservationMode + bool m_hasSeenShouldRepatch : 1; + bool m_hasSeenClosure : 1; + bool m_clearedByGC : 1; + unsigned m_callType : 4; // CallType + unsigned m_calleeGPR : 8; + uint8_t m_maxNumArguments; // Only used for varargs calls. + uint32_t m_slowPathCount; + CodeOrigin m_codeOrigin; +}; + +inline CodeOrigin getCallLinkInfoCodeOrigin(CallLinkInfo& callLinkInfo) +{ + return callLinkInfo.codeOrigin(); +} + +typedef HashMap<CodeOrigin, CallLinkInfo*, CodeOriginApproximateHash> CallLinkInfoMap; + +#else // ENABLE(JIT) + +typedef HashMap<int, void*> CallLinkInfoMap; + +#endif // ENABLE(JIT) + +} // namespace JSC + +#endif // CallLinkInfo_h diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp new file mode 100644 index 000000000..103a7f2b5 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp @@ -0,0 +1,342 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallLinkStatus.h" + +#include "CallLinkInfo.h" +#include "CodeBlock.h" +#include "DFGJITCode.h" +#include "LLIntCallLinkInfo.h" +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> +#include <wtf/ListDump.h> + +namespace JSC { + +static const bool verbose = false; + +CallLinkStatus::CallLinkStatus(JSValue value) + : m_couldTakeSlowPath(false) + , m_isProved(false) +{ + if (!value || !value.isCell()) { + m_couldTakeSlowPath = true; + return; + } + + m_variants.append(CallVariant(value.asCell())); +} + +CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); +#if ENABLE(DFG_JIT) + if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) { + // We could force this to be a closure call, but instead we'll just assume that it + // takes slow path. + return takesSlowPath(); + } +#else + UNUSED_PARAM(locker); +#endif + + VM& vm = *profiledBlock->vm(); + + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; + OpcodeID op = vm.interpreter->getOpcodeID(instruction[0].u.opcode); + if (op != op_call && op != op_construct) + return CallLinkStatus(); + + LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo; + + return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); +} + +CallLinkStatus CallLinkStatus::computeFor( + CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map) +{ + ConcurrentJITLocker locker(profiledBlock->m_lock); + + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(map); +#if ENABLE(DFG_JIT) + ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex); + + CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex)); + if (!callLinkInfo) { + if (exitSiteData.m_takesSlowPath) + return takesSlowPath(); + return computeFromLLInt(locker, profiledBlock, bytecodeIndex); + } + + return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData); +#else + return CallLinkStatus(); +#endif +} + +CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + ExitSiteData exitSiteData; + +#if ENABLE(DFG_JIT) + exitSiteData.m_takesSlowPath = + profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable)); + exitSiteData.m_badFunction = + profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell)); +#else + UNUSED_PARAM(locker); + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); +#endif + + return exitSiteData; +} + +#if ENABLE(JIT) +CallLinkStatus CallLinkStatus::computeFor( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo) +{ + // We don't really need this, but anytime we have to debug this code, it becomes indispensable. + UNUSED_PARAM(profiledBlock); + + CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo); + result.m_maxNumArguments = callLinkInfo.maxNumArguments(); + return result; +} + +CallLinkStatus CallLinkStatus::computeFromCallLinkInfo( + const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo) +{ + if (callLinkInfo.clearedByGC()) + return takesSlowPath(); + + // Note that despite requiring that the locker is held, this code is racy with respect + // to the CallLinkInfo: it may get cleared while this code runs! This is because + // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns + // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns + // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock() + // itself to figure out which lock to lock. + // + // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow + // path count, the stub, and the target - can all be asked racily. Stubs and targets can + // only be deleted at next GC, so if we load a non-null one, then it must contain data + // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness + // is probably OK for now. + + // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive + // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is + // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative + // fencing in place to make sure that we see the variants list after construction. + if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) { + WTF::loadLoadFence(); + + CallEdgeList edges = stub->edges(); + + // Now that we've loaded the edges list, there are no further concurrency concerns. We will + // just manipulate and prune this list to our liking - mostly removing entries that are too + // infrequent and ensuring that it's sorted in descending order of frequency. + + RELEASE_ASSERT(edges.size()); + + std::sort( + edges.begin(), edges.end(), + [] (CallEdge a, CallEdge b) { + return a.count() > b.count(); + }); + RELEASE_ASSERT(edges.first().count() >= edges.last().count()); + + double totalCallsToKnown = 0; + double totalCallsToUnknown = callLinkInfo.slowPathCount(); + CallVariantList variants; + for (size_t i = 0; i < edges.size(); ++i) { + CallEdge edge = edges[i]; + // If the call is at the tail of the distribution, then we don't optimize it and we + // treat it as if it was a call to something unknown. We define the tail as being either + // a call that doesn't belong to the N most frequent callees (N = + // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too + // small. + if (i >= Options::maxPolymorphicCallVariantsForInlining() + || edge.count() < Options::frequentCallThreshold()) + totalCallsToUnknown += edge.count(); + else { + totalCallsToKnown += edge.count(); + variants.append(edge.callee()); + } + } + + // Bail if we didn't find any calls that qualified. + RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size()); + if (variants.isEmpty()) + return takesSlowPath(); + + // We require that the distribution of callees is skewed towards a handful of common ones. + if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate()) + return takesSlowPath(); + + RELEASE_ASSERT(totalCallsToKnown); + RELEASE_ASSERT(variants.size()); + + CallLinkStatus result; + result.m_variants = variants; + result.m_couldTakeSlowPath = !!totalCallsToUnknown; + return result; + } + + CallLinkStatus result; + + if (JSFunction* target = callLinkInfo.lastSeenCallee()) { + CallVariant variant(target); + if (callLinkInfo.hasSeenClosure()) + variant = variant.despecifiedClosure(); + result.m_variants.append(variant); + } + + result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount(); + + return result; +} + +CallLinkStatus CallLinkStatus::computeFor( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo, + ExitSiteData exitSiteData) +{ + CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo); + if (exitSiteData.m_badFunction) + result.makeClosureCall(); + if (exitSiteData.m_takesSlowPath) + result.m_couldTakeSlowPath = true; + + return result; +} +#endif + +void CallLinkStatus::computeDFGStatuses( + CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map) +{ +#if ENABLE(DFG_JIT) + RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT); + CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative(); + for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) { + CallLinkInfo& info = **iter; + CodeOrigin codeOrigin = info.codeOrigin(); + + // Check if we had already previously made a terrible mistake in the FTL for this + // code origin. Note that this is approximate because we could have a monovariant + // inline in the FTL that ended up failing. We should fix that at some point by + // having data structures to track the context of frequent exits. This is currently + // challenging because it would require creating a CodeOrigin-based database in + // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the + // InlineCallFrames. + CodeBlock* currentBaseline = + baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock); + ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(currentBaseline->m_lock); + exitSiteData = computeExitSiteData( + locker, currentBaseline, codeOrigin.bytecodeIndex); + } + + { + ConcurrentJITLocker locker(dfgCodeBlock->m_lock); + map.add(info.codeOrigin(), computeFor(locker, dfgCodeBlock, info, exitSiteData)); + } + } +#else + UNUSED_PARAM(dfgCodeBlock); +#endif // ENABLE(DFG_JIT) + + if (verbose) { + dataLog("Context map:\n"); + ContextMap::iterator iter = map.begin(); + ContextMap::iterator end = map.end(); + for (; iter != end; ++iter) { + dataLog(" ", iter->key, ":\n"); + dataLog(" ", iter->value, "\n"); + } + } +} + +CallLinkStatus CallLinkStatus::computeFor( + CodeBlock* profiledBlock, CodeOrigin codeOrigin, + const CallLinkInfoMap& baselineMap, const CallLinkStatus::ContextMap& dfgMap) +{ + auto iter = dfgMap.find(codeOrigin); + if (iter != dfgMap.end()) + return iter->value; + + return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap); +} + +void CallLinkStatus::setProvenConstantCallee(CallVariant variant) +{ + m_variants = CallVariantList{ variant }; + m_couldTakeSlowPath = false; + m_isProved = true; +} + +bool CallLinkStatus::isClosureCall() const +{ + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].isClosureCall()) + return true; + } + return false; +} + +void CallLinkStatus::makeClosureCall() +{ + m_variants = despecifiedVariantList(m_variants); +} + +void CallLinkStatus::dump(PrintStream& out) const +{ + if (!isSet()) { + out.print("Not Set"); + return; + } + + CommaPrinter comma; + + if (m_isProved) + out.print(comma, "Statically Proved"); + + if (m_couldTakeSlowPath) + out.print(comma, "Could Take Slow Path"); + + if (!m_variants.isEmpty()) + out.print(comma, listDump(m_variants)); + + if (m_maxNumArguments) + out.print(comma, "maxNumArguments = ", m_maxNumArguments); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h new file mode 100644 index 000000000..271d8b6e1 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallLinkStatus_h +#define CallLinkStatus_h + +#include "CallLinkInfo.h" +#include "CallVariant.h" +#include "CodeOrigin.h" +#include "CodeSpecializationKind.h" +#include "ConcurrentJITLock.h" +#include "ExitingJITType.h" +#include "Intrinsic.h" +#include "JSCJSValue.h" + +namespace JSC { + +class CodeBlock; +class ExecutableBase; +class InternalFunction; +class JSFunction; +class Structure; +class CallLinkInfo; + +class CallLinkStatus { + WTF_MAKE_FAST_ALLOCATED; +public: + CallLinkStatus() + : m_couldTakeSlowPath(false) + , m_isProved(false) + { + } + + static CallLinkStatus takesSlowPath() + { + CallLinkStatus result; + result.m_couldTakeSlowPath = true; + return result; + } + + explicit CallLinkStatus(JSValue); + + CallLinkStatus(CallVariant variant) + : m_variants(1, variant) + , m_couldTakeSlowPath(false) + , m_isProved(false) + { + } + + static CallLinkStatus computeFor( + CodeBlock*, unsigned bytecodeIndex, const CallLinkInfoMap&); + + struct ExitSiteData { + ExitSiteData() + : m_takesSlowPath(false) + , m_badFunction(false) + { + } + + bool m_takesSlowPath; + bool m_badFunction; + }; + static ExitSiteData computeExitSiteData(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); + +#if ENABLE(JIT) + // Computes the status assuming that we never took slow path and never previously + // exited. + static CallLinkStatus computeFor(const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&); + static CallLinkStatus computeFor( + const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData); +#endif + + typedef HashMap<CodeOrigin, CallLinkStatus, CodeOriginApproximateHash> ContextMap; + + // Computes all of the statuses of the DFG code block. Doesn't include statuses that had + // no information. Currently we use this when compiling FTL code, to enable polyvariant + // inlining. + static void computeDFGStatuses(CodeBlock* dfgCodeBlock, ContextMap&); + + // Helper that first consults the ContextMap and then does computeFor(). + static CallLinkStatus computeFor( + CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&); + + void setProvenConstantCallee(CallVariant); + + bool isSet() const { return !m_variants.isEmpty() || m_couldTakeSlowPath; } + + bool operator!() const { return !isSet(); } + + bool couldTakeSlowPath() const { return m_couldTakeSlowPath; } + + CallVariantList variants() const { return m_variants; } + unsigned size() const { return m_variants.size(); } + CallVariant at(unsigned i) const { return m_variants[i]; } + CallVariant operator[](unsigned i) const { return at(i); } + bool isProved() const { return m_isProved; } + bool canOptimize() const { return !m_variants.isEmpty(); } + + bool isClosureCall() const; // Returns true if any callee is a closure call. + + unsigned maxNumArguments() const { return m_maxNumArguments; } + + void dump(PrintStream&) const; + +private: + void makeClosureCall(); + + static CallLinkStatus computeFromLLInt(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); +#if ENABLE(JIT) + static CallLinkStatus computeFromCallLinkInfo( + const ConcurrentJITLocker&, CallLinkInfo&); +#endif + + CallVariantList m_variants; + bool m_couldTakeSlowPath; + bool m_isProved; + unsigned m_maxNumArguments; +}; + +} // namespace JSC + +#endif // CallLinkStatus_h + diff --git a/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h new file mode 100644 index 000000000..496738f09 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallReturnOffsetToBytecodeOffset.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallReturnOffsetToBytecodeOffset_h +#define CallReturnOffsetToBytecodeOffset_h + +namespace JSC { + +#if ENABLE(JIT) +// This structure is used to map from a call return location +// (given as an offset in bytes into the JIT code) back to +// the bytecode index of the corresponding bytecode operation. +// This is then used to look up the corresponding handler. +// FIXME: This should be made inlining aware! Currently it isn't +// because we never inline code that has exception handlers. +struct CallReturnOffsetToBytecodeOffset { + CallReturnOffsetToBytecodeOffset(unsigned callReturnOffset, unsigned bytecodeOffset) + : callReturnOffset(callReturnOffset) + , bytecodeOffset(bytecodeOffset) + { + } + + unsigned callReturnOffset; + unsigned bytecodeOffset; +}; + +inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeOffset* pc) +{ + return pc->callReturnOffset; +} +#endif + +} // namespace JSC + +#endif // CallReturnOffsetToBytecodeOffset_h + diff --git a/Source/JavaScriptCore/bytecode/CallVariant.cpp b/Source/JavaScriptCore/bytecode/CallVariant.cpp new file mode 100644 index 000000000..9745dde2b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallVariant.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallVariant.h" + +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +void CallVariant::dump(PrintStream& out) const +{ + if (!*this) { + out.print("null"); + return; + } + + if (InternalFunction* internalFunction = this->internalFunction()) { + out.print("InternalFunction: ", JSValue(internalFunction)); + return; + } + + if (JSFunction* function = this->function()) { + out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")"); + return; + } + + out.print("Executable: ", *executable()); +} + +CallVariantList variantListWithVariant(const CallVariantList& list, CallVariant variantToAdd) +{ + ASSERT(variantToAdd); + CallVariantList result; + for (CallVariant variant : list) { + ASSERT(variant); + if (!!variantToAdd) { + if (variant == variantToAdd) + variantToAdd = CallVariant(); + else if (variant.despecifiedClosure() == variantToAdd.despecifiedClosure()) { + variant = variant.despecifiedClosure(); + variantToAdd = CallVariant(); + } + } + result.append(variant); + } + if (!!variantToAdd) + result.append(variantToAdd); + + if (!ASSERT_DISABLED) { + for (unsigned i = 0; i < result.size(); ++i) { + for (unsigned j = i + 1; j < result.size(); ++j) { + if (result[i] != result[j]) + continue; + + dataLog("variantListWithVariant(", listDump(list), ", ", variantToAdd, ") failed: got duplicates in result: ", listDump(result), "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + } + } + + return result; +} + +CallVariantList despecifiedVariantList(const CallVariantList& list) +{ + CallVariantList result; + for (CallVariant variant : list) + result = variantListWithVariant(result, variant.despecifiedClosure()); + return result; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/CallVariant.h b/Source/JavaScriptCore/bytecode/CallVariant.h new file mode 100644 index 000000000..2514f72b8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CallVariant.h @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallVariant_h +#define CallVariant_h + +#include "Executable.h" +#include "JSCell.h" +#include "JSFunction.h" + +namespace JSC { + +// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking +// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal +// non-function objects that nevertheless provide call traps, the call machinery wants to see a +// callee in one of the following four forms: +// +// JSFunction callee: This means that we expect the callsite to always call a particular function +// instance, that is associated with a particular lexical environment. This pinpoints not +// just the code that will be called (i.e. the executable) but also the scope within which +// the code runs. +// +// Executable callee: This corresponds to a call to a closure. In this case, we know that the +// callsite will call a JSFunction, but we do not know which particular JSFunction. We do know +// what code will be called - i.e. we know the executable. +// +// InternalFunction callee: JSC supports a special kind of native functions that support bizarre +// semantics. These are always singletons. If we know that the callee is an InternalFunction +// then we know both the code that will be called and the scope; in fact the "scope" is really +// just the InternalFunction itself. +// +// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports +// bizarre object callees, but it can't really tell you anything interesting about them other +// than the fact that they don't fall into any of the above categories. +// +// This class serves as a kind of union over these four things. It does so by just holding a +// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that we +// cannot use WriteBarrier<> here because this gets used inside the compiler. + +class CallVariant { +public: + explicit CallVariant(JSCell* callee = nullptr) + : m_callee(callee) + { + } + + CallVariant(WTF::HashTableDeletedValueType) + : m_callee(deletedToken()) + { + } + + bool operator!() const { return !m_callee; } + + // If this variant refers to a function, change it to refer to its executable. + ALWAYS_INLINE CallVariant despecifiedClosure() const + { + if (m_callee->type() == JSFunctionType) + return CallVariant(jsCast<JSFunction*>(m_callee)->executable()); + return *this; + } + + JSCell* rawCalleeCell() const { return m_callee; } + + InternalFunction* internalFunction() const + { + return jsDynamicCast<InternalFunction*>(m_callee); + } + + JSFunction* function() const + { + return jsDynamicCast<JSFunction*>(m_callee); + } + + bool isClosureCall() const { return !!jsDynamicCast<ExecutableBase*>(m_callee); } + + ExecutableBase* executable() const + { + if (JSFunction* function = this->function()) + return function->executable(); + return jsDynamicCast<ExecutableBase*>(m_callee); + } + + JSCell* nonExecutableCallee() const + { + RELEASE_ASSERT(!isClosureCall()); + return m_callee; + } + + Intrinsic intrinsicFor(CodeSpecializationKind kind) const + { + if (ExecutableBase* executable = this->executable()) + return executable->intrinsicFor(kind); + return NoIntrinsic; + } + + FunctionExecutable* functionExecutable() const + { + if (ExecutableBase* executable = this->executable()) + return jsDynamicCast<FunctionExecutable*>(executable); + return nullptr; + } + + void dump(PrintStream& out) const; + + bool isHashTableDeletedValue() const + { + return m_callee == deletedToken(); + } + + bool operator==(const CallVariant& other) const + { + return m_callee == other.m_callee; + } + + bool operator!=(const CallVariant& other) const + { + return !(*this == other); + } + + bool operator<(const CallVariant& other) const + { + return m_callee < other.m_callee; + } + + bool operator>(const CallVariant& other) const + { + return other < *this; + } + + bool operator<=(const CallVariant& other) const + { + return !(*this < other); + } + + bool operator>=(const CallVariant& other) const + { + return other <= *this; + } + + unsigned hash() const + { + return WTF::PtrHash<JSCell*>::hash(m_callee); + } + +private: + static JSCell* deletedToken() { return bitwise_cast<JSCell*>(static_cast<uintptr_t>(1)); } + + JSCell* m_callee; +}; + +struct CallVariantHash { + static unsigned hash(const CallVariant& key) { return key.hash(); } + static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +typedef Vector<CallVariant, 1> CallVariantList; + +// Returns a new variant list by attempting to either append the given variant or merge it with one +// of the variants we already have by despecifying closures. +CallVariantList variantListWithVariant(const CallVariantList&, CallVariant); + +// Returns a new list where every element is despecified, and the list is deduplicated. +CallVariantList despecifiedVariantList(const CallVariantList&); + +} // namespace JSC + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::CallVariant> { + typedef JSC::CallVariantHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::CallVariant> : SimpleClassHashTraits<JSC::CallVariant> { }; + +} // namespace WTF + +#endif // CallVariant_h + diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp new file mode 100644 index 000000000..394974eaa --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -0,0 +1,3976 @@ +/* + * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CodeBlock.h" + +#include "BasicBlockLocation.h" +#include "BytecodeGenerator.h" +#include "BytecodeUseDef.h" +#include "CallLinkStatus.h" +#include "DFGCapabilities.h" +#include "DFGCommon.h" +#include "DFGDriver.h" +#include "DFGJITCode.h" +#include "DFGWorklist.h" +#include "Debugger.h" +#include "FunctionExecutableDump.h" +#include "Interpreter.h" +#include "JIT.h" +#include "JITStubs.h" +#include "JSCJSValue.h" +#include "JSFunction.h" +#include "JSLexicalEnvironment.h" +#include "LLIntEntrypoint.h" +#include "LowLevelInterpreter.h" +#include "JSCInlines.h" +#include "PolymorphicGetByIdList.h" +#include "PolymorphicPutByIdList.h" +#include "ProfilerDatabase.h" +#include "ReduceWhitespace.h" +#include "Repatch.h" +#include "RepatchBuffer.h" +#include "SlotVisitorInlines.h" +#include "StackVisitor.h" +#include "TypeLocationCache.h" +#include "TypeProfiler.h" +#include "UnlinkedInstructionStream.h" +#include <wtf/BagToHashMap.h> +#include <wtf/CommaPrinter.h> +#include <wtf/StringExtras.h> +#include <wtf/StringPrintStream.h> +#include <wtf/text/UniquedStringImpl.h> + +#if ENABLE(DFG_JIT) +#include "DFGOperations.h" +#endif + +#if ENABLE(FTL_JIT) +#include "FTLJITCode.h" +#endif + +namespace JSC { + +CString CodeBlock::inferredName() const +{ + switch (codeType()) { + case GlobalCode: + return "<global>"; + case EvalCode: + return "<eval>"; + case FunctionCode: + return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8(); + default: + CRASH(); + return CString("", 0); + } +} + +bool CodeBlock::hasHash() const +{ + return !!m_hash; +} + +bool CodeBlock::isSafeToComputeHash() const +{ + return !isCompilationThread(); +} + +CodeBlockHash CodeBlock::hash() const +{ + if (!m_hash) { + RELEASE_ASSERT(isSafeToComputeHash()); + m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind()); + } + return m_hash; +} + +CString CodeBlock::sourceCodeForTools() const +{ + if (codeType() != FunctionCode) + return ownerExecutable()->source().toUTF8(); + + SourceProvider* provider = source(); + FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable()); + UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable(); + unsigned unlinkedStartOffset = unlinked->startOffset(); + unsigned linkedStartOffset = executable->source().startOffset(); + int delta = linkedStartOffset - unlinkedStartOffset; + unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart(); + unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength(); + return toCString( + "function ", + provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart)); +} + +CString CodeBlock::sourceCodeOnOneLine() const +{ + return reduceWhitespace(sourceCodeForTools()); +} + +CString CodeBlock::hashAsStringIfPossible() const +{ + if (hasHash() || isSafeToComputeHash()) + return toCString(hash()); + return "<no-hash>"; +} + +void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const +{ + out.print(inferredName(), "#", hashAsStringIfPossible()); + out.print(":[", RawPointer(this), "->"); + if (!!m_alternative) + out.print(RawPointer(m_alternative.get()), "->"); + out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType()); + + if (codeType() == FunctionCode) + out.print(specializationKind()); + out.print(", ", instructionCount()); + if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined) + out.print(" (ShouldAlwaysBeInlined)"); + if (ownerExecutable()->neverInline()) + out.print(" (NeverInline)"); + if (ownerExecutable()->didTryToEnterInLoop()) + out.print(" (DidTryToEnterInLoop)"); + if (ownerExecutable()->isStrictMode()) + out.print(" (StrictMode)"); + if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation) + out.print(" (FTLFail)"); + if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL) + out.print(" (HadFTLReplacement)"); + out.print("]"); +} + +void CodeBlock::dump(PrintStream& out) const +{ + dumpAssumingJITType(out, jitType()); +} + +static CString idName(int id0, const Identifier& ident) +{ + return toCString(ident.impl(), "(@id", id0, ")"); +} + +CString CodeBlock::registerName(int r) const +{ + if (isConstantRegisterIndex(r)) + return constantName(r); + + return toCString(VirtualRegister(r)); +} + +CString CodeBlock::constantName(int index) const +{ + JSValue value = getConstant(index); + return toCString(value, "(", VirtualRegister(index), ")"); +} + +static CString regexpToSourceString(RegExp* regExp) +{ + char postfix[5] = { '/', 0, 0, 0, 0 }; + int index = 1; + if (regExp->global()) + postfix[index++] = 'g'; + if (regExp->ignoreCase()) + postfix[index++] = 'i'; + if (regExp->multiline()) + postfix[index] = 'm'; + + return toCString("/", regExp->pattern().impl(), postfix); +} + +static CString regexpName(int re, RegExp* regexp) +{ + return toCString(regexpToSourceString(regexp), "(@re", re, ")"); +} + +NEVER_INLINE static const char* debugHookName(int debugHookID) +{ + switch (static_cast<DebugHookID>(debugHookID)) { + case DidEnterCallFrame: + return "didEnterCallFrame"; + case WillLeaveCallFrame: + return "willLeaveCallFrame"; + case WillExecuteStatement: + return "willExecuteStatement"; + case WillExecuteProgram: + return "willExecuteProgram"; + case DidExecuteProgram: + return "didExecuteProgram"; + case DidReachBreakpoint: + return "didReachBreakpoint"; + } + + RELEASE_ASSERT_NOT_REACHED(); + return ""; +} + +void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op) +{ + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); +} + +void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op) +{ + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); +} + +void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op) +{ + int r0 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset); +} + +void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it) +{ + const char* op; + switch (exec->interpreter()->getOpcodeID(it->u.opcode)) { + case op_get_by_id: + op = "get_by_id"; + break; + case op_get_by_id_out_of_line: + op = "get_by_id_out_of_line"; + break; + case op_get_array_length: + op = "array_length"; + break; + default: + RELEASE_ASSERT_NOT_REACHED(); +#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) + op = 0; +#endif + } + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int id0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); + it += 4; // Increment up to the value profiler. +} + +static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident) +{ + if (!structure) + return; + + out.printf("%s = %p", name, structure); + + PropertyOffset offset = structure->getConcurrently(ident.impl()); + if (offset != invalidOffset) + out.printf(" (offset = %d)", offset); +} + +static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident) +{ + out.printf("chain = %p: [", chain); + bool first = true; + for (WriteBarrier<Structure>* currentStructure = chain->head(); + *currentStructure; + ++currentStructure) { + if (first) + first = false; + else + out.printf(", "); + dumpStructure(out, "struct", currentStructure->get(), ident); + } + out.printf("]"); +} + +void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map) +{ + Instruction* instruction = instructions().begin() + location; + + const Identifier& ident = identifier(instruction[3].u.operand); + + UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. + + if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length) + out.printf(" llint(array_length)"); + else if (Structure* structure = instruction[4].u.structure.get()) { + out.printf(" llint("); + dumpStructure(out, "struct", structure, ident); + out.printf(")"); + } + +#if ENABLE(JIT) + if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) { + StructureStubInfo& stubInfo = *stubPtr; + if (stubInfo.resetByGC) + out.print(" (Reset By GC)"); + + if (stubInfo.seen) { + out.printf(" jit("); + + Structure* baseStructure = 0; + Structure* prototypeStructure = 0; + PolymorphicGetByIdList* list = 0; + + switch (stubInfo.accessType) { + case access_get_by_id_self: + out.printf("self"); + baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get(); + break; + case access_get_by_id_list: + out.printf("list"); + list = stubInfo.u.getByIdList.list; + break; + case access_unset: + out.printf("unset"); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + + if (baseStructure) { + out.printf(", "); + dumpStructure(out, "struct", baseStructure, ident); + } + + if (prototypeStructure) { + out.printf(", "); + dumpStructure(out, "prototypeStruct", baseStructure, ident); + } + + if (list) { + out.printf(", list = %p: [", list); + for (unsigned i = 0; i < list->size(); ++i) { + if (i) + out.printf(", "); + out.printf("("); + dumpStructure(out, "base", list->at(i).structure(), ident); + if (!list->at(i).conditionSet().isEmpty()) { + out.printf(", "); + out.print(list->at(i).conditionSet()); + } + out.printf(")"); + } + out.printf("]"); + } + out.printf(")"); + } + } +#else + UNUSED_PARAM(map); +#endif +} + +void CodeBlock::printPutByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map) +{ + Instruction* instruction = instructions().begin() + location; + + const Identifier& ident = identifier(instruction[2].u.operand); + + UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. + + if (Structure* structure = instruction[4].u.structure.get()) { + switch (exec->interpreter()->getOpcodeID(instruction[0].u.opcode)) { + case op_put_by_id: + case op_put_by_id_out_of_line: + out.print(" llint("); + dumpStructure(out, "struct", structure, ident); + out.print(")"); + break; + + case op_put_by_id_transition_direct: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: + out.print(" llint("); + dumpStructure(out, "prev", structure, ident); + out.print(", "); + dumpStructure(out, "next", instruction[6].u.structure.get(), ident); + if (StructureChain* chain = instruction[7].u.structureChain.get()) { + out.print(", "); + dumpChain(out, chain, ident); + } + out.print(")"); + break; + + default: + out.print(" llint(unknown)"); + break; + } + } + +#if ENABLE(JIT) + if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) { + StructureStubInfo& stubInfo = *stubPtr; + if (stubInfo.resetByGC) + out.print(" (Reset By GC)"); + + if (stubInfo.seen) { + out.printf(" jit("); + + switch (stubInfo.accessType) { + case access_put_by_id_replace: + out.print("replace, "); + dumpStructure(out, "struct", stubInfo.u.putByIdReplace.baseObjectStructure.get(), ident); + break; + case access_put_by_id_transition_normal: + case access_put_by_id_transition_direct: + out.print("transition, "); + dumpStructure(out, "prev", stubInfo.u.putByIdTransition.previousStructure.get(), ident); + out.print(", "); + dumpStructure(out, "next", stubInfo.u.putByIdTransition.structure.get(), ident); + if (stubInfo.u.putByIdTransition.rawConditionSet) + out.print(", ", ObjectPropertyConditionSet::fromRawPointer(stubInfo.u.putByIdTransition.rawConditionSet)); + break; + case access_put_by_id_list: { + out.printf("list = ["); + PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list; + CommaPrinter comma; + for (unsigned i = 0; i < list->size(); ++i) { + out.print(comma, "("); + const PutByIdAccess& access = list->at(i); + + if (access.isReplace()) { + out.print("replace, "); + dumpStructure(out, "struct", access.oldStructure(), ident); + } else if (access.isSetter()) { + out.print("setter, "); + dumpStructure(out, "struct", access.oldStructure(), ident); + } else if (access.isCustom()) { + out.print("custom, "); + dumpStructure(out, "struct", access.oldStructure(), ident); + } else if (access.isTransition()) { + out.print("transition, "); + dumpStructure(out, "prev", access.oldStructure(), ident); + out.print(", "); + dumpStructure(out, "next", access.newStructure(), ident); + if (!access.conditionSet().isEmpty()) + out.print(", ", access.conditionSet()); + } else + out.print("unknown"); + + out.print(")"); + } + out.print("]"); + break; + } + case access_unset: + out.printf("unset"); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + out.printf(")"); + } + } +#else + UNUSED_PARAM(map); +#endif +} + +void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map) +{ + int dst = (++it)->u.operand; + int func = (++it)->u.operand; + int argCount = (++it)->u.operand; + int registerOffset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset); + if (cacheDumpMode == DumpCaches) { + LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo; + if (callLinkInfo->lastSeenCallee) { + out.printf( + " llint(%p, exec %p)", + callLinkInfo->lastSeenCallee.get(), + callLinkInfo->lastSeenCallee->executable()); + } +#if ENABLE(JIT) + if (CallLinkInfo* info = map.get(CodeOrigin(location))) { + JSFunction* target = info->lastSeenCallee(); + if (target) + out.printf(" jit(%p, exec %p)", target, target->executable()); + } + + if (jitType() != JITCode::FTLJIT) + out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")"); +#else + UNUSED_PARAM(map); +#endif + } + ++it; + ++it; + dumpArrayProfiling(out, it, hasPrintedProfiling); + dumpValueProfiling(out, it, hasPrintedProfiling); +} + +void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op) +{ + int r0 = (++it)->u.operand; + int id0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, op); + out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); + it += 5; +} + +void CodeBlock::dumpSource() +{ + dumpSource(WTF::dataFile()); +} + +void CodeBlock::dumpSource(PrintStream& out) +{ + ScriptExecutable* executable = ownerExecutable(); + if (executable->isFunctionExecutable()) { + FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable); + String source = functionExecutable->source().provider()->getRange( + functionExecutable->parametersStartOffset(), + functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'. + + out.print("function ", inferredName(), source); + return; + } + out.print(executable->source().toString()); +} + +void CodeBlock::dumpBytecode() +{ + dumpBytecode(WTF::dataFile()); +} + +void CodeBlock::dumpBytecode(PrintStream& out) +{ + // We only use the ExecState* for things that don't actually lead to JS execution, + // like converting a JSString to a String. Hence the globalExec is appropriate. + ExecState* exec = m_globalObject->globalExec(); + + size_t instructionCount = 0; + + for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)]) + ++instructionCount; + + out.print(*this); + out.printf( + ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", + static_cast<unsigned long>(instructions().size()), + static_cast<unsigned long>(instructions().size() * sizeof(Instruction)), + m_numParameters, m_numCalleeRegisters, m_numVars); + if (needsActivation() && codeType() == FunctionCode) + out.printf("; lexical environment in r%d", activationRegister().offset()); + out.printf("\n"); + + StubInfoMap stubInfos; + CallLinkInfoMap callLinkInfos; + getStubInfoMap(stubInfos); + getCallLinkInfoMap(callLinkInfos); + + const Instruction* begin = instructions().begin(); + const Instruction* end = instructions().end(); + for (const Instruction* it = begin; it != end; ++it) + dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos); + + if (numberOfIdentifiers()) { + out.printf("\nIdentifiers:\n"); + size_t i = 0; + do { + out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); + ++i; + } while (i != numberOfIdentifiers()); + } + + if (!m_constantRegisters.isEmpty()) { + out.printf("\nConstants:\n"); + size_t i = 0; + do { + const char* sourceCodeRepresentationDescription = nullptr; + switch (m_constantsSourceCodeRepresentation[i]) { + case SourceCodeRepresentation::Double: + sourceCodeRepresentationDescription = ": in source as double"; + break; + case SourceCodeRepresentation::Integer: + sourceCodeRepresentationDescription = ": in source as integer"; + break; + case SourceCodeRepresentation::Other: + sourceCodeRepresentationDescription = ""; + break; + } + out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription); + ++i; + } while (i < m_constantRegisters.size()); + } + + if (size_t count = m_unlinkedCode->numberOfRegExps()) { + out.printf("\nm_regexps:\n"); + size_t i = 0; + do { + out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data()); + ++i; + } while (i < count); + } + + if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) { + out.printf("\nException Handlers:\n"); + unsigned i = 0; + do { + HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; + out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", + i + 1, handler.start, handler.end, handler.target, handler.typeName()); + ++i; + } while (i < m_rareData->m_exceptionHandlers.size()); + } + + if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) { + out.printf("Switch Jump Tables:\n"); + unsigned i = 0; + do { + out.printf(" %1d = {\n", i); + int entry = 0; + Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end(); + for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) { + if (!*iter) + continue; + out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter); + } + out.printf(" }\n"); + ++i; + } while (i < m_rareData->m_switchJumpTables.size()); + } + + if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) { + out.printf("\nString Switch Jump Tables:\n"); + unsigned i = 0; + do { + out.printf(" %1d = {\n", i); + StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end(); + for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter) + out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); + out.printf(" }\n"); + ++i; + } while (i < m_rareData->m_stringSwitchJumpTables.size()); + } + + out.printf("\n"); +} + +void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling) +{ + if (hasPrintedProfiling) { + out.print("; "); + return; + } + + out.print(" "); + hasPrintedProfiling = true; +} + +void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling) +{ + ConcurrentJITLocker locker(m_lock); + + ++it; + CString description = it->u.profile->briefDescription(locker); + if (!description.length()) + return; + beginDumpProfiling(out, hasPrintedProfiling); + out.print(description); +} + +void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling) +{ + ConcurrentJITLocker locker(m_lock); + + ++it; + if (!it->u.arrayProfile) + return; + CString description = it->u.arrayProfile->briefDescription(locker, this); + if (!description.length()) + return; + beginDumpProfiling(out, hasPrintedProfiling); + out.print(description); +} + +void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling) +{ + if (!profile || !profile->m_counter) + return; + + beginDumpProfiling(out, hasPrintedProfiling); + out.print(name, profile->m_counter); +} + +void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op) +{ + out.printf("[%4d] %-17s ", location, op); +} + +void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand) +{ + printLocationAndOp(out, exec, location, it, op); + out.printf("%s", registerName(operand).data()); +} + +void CodeBlock::dumpBytecode( + PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, + const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos) +{ + int location = it - begin; + bool hasPrintedProfiling = false; + OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode); + switch (opcode) { + case op_enter: { + printLocationAndOp(out, exec, location, it, "enter"); + break; + } + case op_get_scope: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0); + break; + } + case op_create_direct_arguments: { + int r0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_direct_arguments"); + out.printf("%s", registerName(r0).data()); + break; + } + case op_create_scoped_arguments: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_scoped_arguments"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); + break; + } + case op_create_out_of_band_arguments: { + int r0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments"); + out.printf("%s", registerName(r0).data()); + break; + } + case op_create_this: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + unsigned inferredInlineCapacity = (++it)->u.operand; + unsigned cachedFunction = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_this"); + out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction); + break; + } + case op_to_this: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0); + Structure* structure = (++it)->u.structure.get(); + if (structure) + out.print(", cache(struct = ", RawPointer(structure), ")"); + out.print(", ", (++it)->u.toThisStatus); + break; + } + case op_check_tdz: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0); + break; + } + case op_new_object: { + int r0 = (++it)->u.operand; + unsigned inferredInlineCapacity = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_object"); + out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity); + ++it; // Skip object allocation profile. + break; + } + case op_new_array: { + int dst = (++it)->u.operand; + int argv = (++it)->u.operand; + int argc = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_array"); + out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc); + ++it; // Skip array allocation profile. + break; + } + case op_new_array_with_size: { + int dst = (++it)->u.operand; + int length = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_array_with_size"); + out.printf("%s, %s", registerName(dst).data(), registerName(length).data()); + ++it; // Skip array allocation profile. + break; + } + case op_new_array_buffer: { + int dst = (++it)->u.operand; + int argv = (++it)->u.operand; + int argc = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_array_buffer"); + out.printf("%s, %d, %d", registerName(dst).data(), argv, argc); + ++it; // Skip array allocation profile. + break; + } + case op_new_regexp: { + int r0 = (++it)->u.operand; + int re0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_regexp"); + out.printf("%s, ", registerName(r0).data()); + if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps()) + out.printf("%s", regexpName(re0, regexp(re0)).data()); + else + out.printf("bad_regexp(%d)", re0); + break; + } + case op_mov: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "mov"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); + break; + } + case op_profile_type: { + int r0 = (++it)->u.operand; + ++it; + ++it; + ++it; + ++it; + printLocationAndOp(out, exec, location, it, "op_profile_type"); + out.printf("%s", registerName(r0).data()); + break; + } + case op_profile_control_flow: { + BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation; + printLocationAndOp(out, exec, location, it, "profile_control_flow"); + out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset()); + break; + } + case op_not: { + printUnaryOp(out, exec, location, it, "not"); + break; + } + case op_eq: { + printBinaryOp(out, exec, location, it, "eq"); + break; + } + case op_eq_null: { + printUnaryOp(out, exec, location, it, "eq_null"); + break; + } + case op_neq: { + printBinaryOp(out, exec, location, it, "neq"); + break; + } + case op_neq_null: { + printUnaryOp(out, exec, location, it, "neq_null"); + break; + } + case op_stricteq: { + printBinaryOp(out, exec, location, it, "stricteq"); + break; + } + case op_nstricteq: { + printBinaryOp(out, exec, location, it, "nstricteq"); + break; + } + case op_less: { + printBinaryOp(out, exec, location, it, "less"); + break; + } + case op_lesseq: { + printBinaryOp(out, exec, location, it, "lesseq"); + break; + } + case op_greater: { + printBinaryOp(out, exec, location, it, "greater"); + break; + } + case op_greatereq: { + printBinaryOp(out, exec, location, it, "greatereq"); + break; + } + case op_inc: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0); + break; + } + case op_dec: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0); + break; + } + case op_to_number: { + printUnaryOp(out, exec, location, it, "to_number"); + break; + } + case op_to_string: { + printUnaryOp(out, exec, location, it, "to_string"); + break; + } + case op_negate: { + printUnaryOp(out, exec, location, it, "negate"); + break; + } + case op_add: { + printBinaryOp(out, exec, location, it, "add"); + ++it; + break; + } + case op_mul: { + printBinaryOp(out, exec, location, it, "mul"); + ++it; + break; + } + case op_div: { + printBinaryOp(out, exec, location, it, "div"); + ++it; + break; + } + case op_mod: { + printBinaryOp(out, exec, location, it, "mod"); + break; + } + case op_sub: { + printBinaryOp(out, exec, location, it, "sub"); + ++it; + break; + } + case op_lshift: { + printBinaryOp(out, exec, location, it, "lshift"); + break; + } + case op_rshift: { + printBinaryOp(out, exec, location, it, "rshift"); + break; + } + case op_urshift: { + printBinaryOp(out, exec, location, it, "urshift"); + break; + } + case op_bitand: { + printBinaryOp(out, exec, location, it, "bitand"); + ++it; + break; + } + case op_bitxor: { + printBinaryOp(out, exec, location, it, "bitxor"); + ++it; + break; + } + case op_bitor: { + printBinaryOp(out, exec, location, it, "bitor"); + ++it; + break; + } + case op_check_has_instance: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "check_has_instance"); + out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset); + break; + } + case op_instanceof: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "instanceof"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + break; + } + case op_unsigned: { + printUnaryOp(out, exec, location, it, "unsigned"); + break; + } + case op_typeof: { + printUnaryOp(out, exec, location, it, "typeof"); + break; + } + case op_is_undefined: { + printUnaryOp(out, exec, location, it, "is_undefined"); + break; + } + case op_is_boolean: { + printUnaryOp(out, exec, location, it, "is_boolean"); + break; + } + case op_is_number: { + printUnaryOp(out, exec, location, it, "is_number"); + break; + } + case op_is_string: { + printUnaryOp(out, exec, location, it, "is_string"); + break; + } + case op_is_object: { + printUnaryOp(out, exec, location, it, "is_object"); + break; + } + case op_is_object_or_null: { + printUnaryOp(out, exec, location, it, "is_object_or_null"); + break; + } + case op_is_function: { + printUnaryOp(out, exec, location, it, "is_function"); + break; + } + case op_in: { + printBinaryOp(out, exec, location, it, "in"); + break; + } + case op_get_by_id: + case op_get_by_id_out_of_line: + case op_get_array_length: { + printGetByIdOp(out, exec, location, it); + printGetByIdCacheStatus(out, exec, location, stubInfos); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_by_id: { + printPutByIdOp(out, exec, location, it, "put_by_id"); + printPutByIdCacheStatus(out, exec, location, stubInfos); + break; + } + case op_put_by_id_out_of_line: { + printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line"); + printPutByIdCacheStatus(out, exec, location, stubInfos); + break; + } + case op_put_by_id_transition_direct: { + printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct"); + printPutByIdCacheStatus(out, exec, location, stubInfos); + break; + } + case op_put_by_id_transition_direct_out_of_line: { + printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line"); + printPutByIdCacheStatus(out, exec, location, stubInfos); + break; + } + case op_put_by_id_transition_normal: { + printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal"); + printPutByIdCacheStatus(out, exec, location, stubInfos); + break; + } + case op_put_by_id_transition_normal_out_of_line: { + printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line"); + printPutByIdCacheStatus(out, exec, location, stubInfos); + break; + } + case op_put_getter_by_id: { + int r0 = (++it)->u.operand; + int id0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_getter_by_id"); + out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); + break; + } + case op_put_setter_by_id: { + int r0 = (++it)->u.operand; + int id0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_setter_by_id"); + out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); + break; + } + case op_put_getter_setter: { + int r0 = (++it)->u.operand; + int id0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_getter_setter"); + out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data()); + break; + } + case op_del_by_id: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int id0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "del_by_id"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); + break; + } + case op_get_by_val: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "get_by_val"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + dumpArrayProfiling(out, it, hasPrintedProfiling); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_by_val: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_by_val"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + dumpArrayProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_by_val_direct: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_by_val_direct"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + dumpArrayProfiling(out, it, hasPrintedProfiling); + break; + } + case op_del_by_val: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int r2 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "del_by_val"); + out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); + break; + } + case op_put_by_index: { + int r0 = (++it)->u.operand; + unsigned n0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_by_index"); + out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data()); + break; + } + case op_jmp: { + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jmp"); + out.printf("%d(->%d)", offset, location + offset); + break; + } + case op_jtrue: { + printConditionalJump(out, exec, begin, it, location, "jtrue"); + break; + } + case op_jfalse: { + printConditionalJump(out, exec, begin, it, location, "jfalse"); + break; + } + case op_jeq_null: { + printConditionalJump(out, exec, begin, it, location, "jeq_null"); + break; + } + case op_jneq_null: { + printConditionalJump(out, exec, begin, it, location, "jneq_null"); + break; + } + case op_jneq_ptr: { + int r0 = (++it)->u.operand; + Special::Pointer pointer = (++it)->u.specialPointer; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jneq_ptr"); + out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset); + break; + } + case op_jless: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jless"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jlesseq: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jlesseq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jgreater: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jgreater"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jgreatereq: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jgreatereq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jnless: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jnless"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jnlesseq: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jnlesseq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jngreater: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jngreater"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_jngreatereq: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "jngreatereq"); + out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); + break; + } + case op_loop_hint: { + printLocationAndOp(out, exec, location, it, "loop_hint"); + break; + } + case op_switch_imm: { + int tableIndex = (++it)->u.operand; + int defaultTarget = (++it)->u.operand; + int scrutineeRegister = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "switch_imm"); + out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); + break; + } + case op_switch_char: { + int tableIndex = (++it)->u.operand; + int defaultTarget = (++it)->u.operand; + int scrutineeRegister = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "switch_char"); + out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); + break; + } + case op_switch_string: { + int tableIndex = (++it)->u.operand; + int defaultTarget = (++it)->u.operand; + int scrutineeRegister = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "switch_string"); + out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); + break; + } + case op_new_func: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int f0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_func"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); + break; + } + case op_new_func_exp: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int f0 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "new_func_exp"); + out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); + break; + } + case op_call: { + printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos); + break; + } + case op_call_eval: { + printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos); + break; + } + + case op_construct_varargs: + case op_call_varargs: { + int result = (++it)->u.operand; + int callee = (++it)->u.operand; + int thisValue = (++it)->u.operand; + int arguments = (++it)->u.operand; + int firstFreeRegister = (++it)->u.operand; + int varArgOffset = (++it)->u.operand; + ++it; + printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs"); + out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + + case op_ret: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0); + break; + } + case op_construct: { + printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos); + break; + } + case op_strcat: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int count = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "strcat"); + out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count); + break; + } + case op_to_primitive: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "to_primitive"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); + break; + } + case op_get_enumerable_length: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + printLocationAndOp(out, exec, location, it, "op_get_enumerable_length"); + out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); + it += OPCODE_LENGTH(op_get_enumerable_length) - 1; + break; + } + case op_has_indexed_property: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + ArrayProfile* arrayProfile = it[4].u.arrayProfile; + printLocationAndOp(out, exec, location, it, "op_has_indexed_property"); + out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile); + it += OPCODE_LENGTH(op_has_indexed_property) - 1; + break; + } + case op_has_structure_property: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + int enumerator = it[4].u.operand; + printLocationAndOp(out, exec, location, it, "op_has_structure_property"); + out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data()); + it += OPCODE_LENGTH(op_has_structure_property) - 1; + break; + } + case op_has_generic_property: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + printLocationAndOp(out, exec, location, it, "op_has_generic_property"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); + it += OPCODE_LENGTH(op_has_generic_property) - 1; + break; + } + case op_get_direct_pname: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + int propertyName = it[3].u.operand; + int index = it[4].u.operand; + int enumerator = it[5].u.operand; + ValueProfile* profile = it[6].u.profile; + printLocationAndOp(out, exec, location, it, "op_get_direct_pname"); + out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile); + it += OPCODE_LENGTH(op_get_direct_pname) - 1; + break; + + } + case op_get_property_enumerator: { + int dst = it[1].u.operand; + int base = it[2].u.operand; + printLocationAndOp(out, exec, location, it, "op_get_property_enumerator"); + out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); + it += OPCODE_LENGTH(op_get_property_enumerator) - 1; + break; + } + case op_enumerator_structure_pname: { + int dst = it[1].u.operand; + int enumerator = it[2].u.operand; + int index = it[3].u.operand; + printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); + it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1; + break; + } + case op_enumerator_generic_pname: { + int dst = it[1].u.operand; + int enumerator = it[2].u.operand; + int index = it[3].u.operand; + printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); + it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1; + break; + } + case op_to_index_string: { + int dst = it[1].u.operand; + int index = it[2].u.operand; + printLocationAndOp(out, exec, location, it, "op_to_index_string"); + out.printf("%s, %s", registerName(dst).data(), registerName(index).data()); + it += OPCODE_LENGTH(op_to_index_string) - 1; + break; + } + case op_push_with_scope: { + int dst = (++it)->u.operand; + int newScope = (++it)->u.operand; + int currentScope = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "push_with_scope"); + out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data()); + break; + } + case op_get_parent_scope: { + int dst = (++it)->u.operand; + int parentScope = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "get_parent_scope"); + out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data()); + break; + } + case op_create_lexical_environment: { + int dst = (++it)->u.operand; + int scope = (++it)->u.operand; + int symbolTable = (++it)->u.operand; + int initialValue = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "create_lexical_environment"); + out.printf("%s, %s, %s, %s", + registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data()); + break; + } + case op_catch: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "catch"); + out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); + break; + } + case op_throw: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0); + break; + } + case op_throw_static_error: { + int k0 = (++it)->u.operand; + int k1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "throw_static_error"); + out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false"); + break; + } + case op_debug: { + int debugHookID = (++it)->u.operand; + int hasBreakpointFlag = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "debug"); + out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag); + break; + } + case op_profile_will_call: { + int function = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function); + break; + } + case op_profile_did_call: { + int function = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function); + break; + } + case op_end: { + int r0 = (++it)->u.operand; + printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0); + break; + } + case op_resolve_scope: { + int r0 = (++it)->u.operand; + int scope = (++it)->u.operand; + int id0 = (++it)->u.operand; + ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand); + int depth = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "resolve_scope"); + out.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), + modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()), + depth); + ++it; + break; + } + case op_get_from_scope: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int id0 = (++it)->u.operand; + ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand); + ++it; // Structure + int operand = (++it)->u.operand; // Operand + printLocationAndOp(out, exec, location, it, "get_from_scope"); + out.print(registerName(r0), ", ", registerName(r1)); + if (static_cast<unsigned>(id0) == UINT_MAX) + out.print(", anonymous"); + else + out.print(", ", idName(id0, identifier(id0))); + out.print(", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, ", operand); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_to_scope: { + int r0 = (++it)->u.operand; + int id0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand); + ++it; // Structure + int operand = (++it)->u.operand; // Operand + printLocationAndOp(out, exec, location, it, "put_to_scope"); + out.print(registerName(r0)); + if (static_cast<unsigned>(id0) == UINT_MAX) + out.print(", anonymous"); + else + out.print(", ", idName(id0, identifier(id0))); + out.print(", ", registerName(r1), ", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, <structure>, ", operand); + break; + } + case op_get_from_arguments: { + int r0 = (++it)->u.operand; + int r1 = (++it)->u.operand; + int offset = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "get_from_arguments"); + out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset); + dumpValueProfiling(out, it, hasPrintedProfiling); + break; + } + case op_put_to_arguments: { + int r0 = (++it)->u.operand; + int offset = (++it)->u.operand; + int r1 = (++it)->u.operand; + printLocationAndOp(out, exec, location, it, "put_to_arguments"); + out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data()); + break; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + } + + dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling); + dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling); + +#if ENABLE(DFG_JIT) + Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location); + if (!exitSites.isEmpty()) { + out.print(" !! frequent exits: "); + CommaPrinter comma; + for (unsigned i = 0; i < exitSites.size(); ++i) + out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType()); + } +#else // ENABLE(DFG_JIT) + UNUSED_PARAM(location); +#endif // ENABLE(DFG_JIT) + out.print("\n"); +} + +void CodeBlock::dumpBytecode( + PrintStream& out, unsigned bytecodeOffset, + const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos) +{ + ExecState* exec = m_globalObject->globalExec(); + const Instruction* it = instructions().begin() + bytecodeOffset; + dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos); +} + +#define FOR_EACH_MEMBER_VECTOR(macro) \ + macro(instructions) \ + macro(callLinkInfos) \ + macro(linkedCallerList) \ + macro(identifiers) \ + macro(functionExpressions) \ + macro(constantRegisters) + +#define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \ + macro(regexps) \ + macro(functions) \ + macro(exceptionHandlers) \ + macro(switchJumpTables) \ + macro(stringSwitchJumpTables) \ + macro(evalCodeCache) \ + macro(expressionInfo) \ + macro(lineInfo) \ + macro(callReturnIndexVector) + +template<typename T> +static size_t sizeInBytes(const Vector<T>& vector) +{ + return vector.capacity() * sizeof(T); +} + +namespace { + +class PutToScopeFireDetail : public FireDetail { +public: + PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident) + : m_codeBlock(codeBlock) + , m_ident(ident) + { + } + + virtual void dump(PrintStream& out) const override + { + out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident); + } + +private: + CodeBlock* m_codeBlock; + const Identifier& m_ident; +}; + +} // anonymous namespace + +CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other) + : m_globalObject(other.m_globalObject) + , m_heap(other.m_heap) + , m_numCalleeRegisters(other.m_numCalleeRegisters) + , m_numVars(other.m_numVars) + , m_isConstructor(other.m_isConstructor) + , m_shouldAlwaysBeInlined(true) + , m_didFailFTLCompilation(false) + , m_hasBeenCompiledWithFTL(false) + , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get()) + , m_hasDebuggerStatement(false) + , m_steppingMode(SteppingModeDisabled) + , m_numBreakpoints(0) + , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get()) + , m_vm(other.m_vm) + , m_instructions(other.m_instructions) + , m_thisRegister(other.m_thisRegister) + , m_scopeRegister(other.m_scopeRegister) + , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister) + , m_isStrictMode(other.m_isStrictMode) + , m_needsActivation(other.m_needsActivation) + , m_mayBeExecuting(false) + , m_source(other.m_source) + , m_sourceOffset(other.m_sourceOffset) + , m_firstLineColumnOffset(other.m_firstLineColumnOffset) + , m_codeType(other.m_codeType) + , m_constantRegisters(other.m_constantRegisters) + , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation) + , m_functionDecls(other.m_functionDecls) + , m_functionExprs(other.m_functionExprs) + , m_osrExitCounter(0) + , m_optimizationDelayCounter(0) + , m_reoptimizationRetryCounter(0) + , m_hash(other.m_hash) +#if ENABLE(JIT) + , m_capabilityLevelState(DFG::CapabilityLevelNotSet) +#endif +{ + m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed); + + ASSERT(m_heap->isDeferred()); + ASSERT(m_scopeRegister.isLocal()); + + setNumParameters(other.numParameters()); + optimizeAfterWarmUp(); + jitAfterWarmUp(); + + if (other.m_rareData) { + createRareDataIfNecessary(); + + m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers; + m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers; + m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables; + m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables; + } + + m_heap->m_codeBlocks.add(this); + m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock)); +} + +CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject()) + , m_heap(&m_globalObject->vm().heap) + , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters) + , m_numVars(unlinkedCodeBlock->m_numVars) + , m_isConstructor(unlinkedCodeBlock->isConstructor()) + , m_shouldAlwaysBeInlined(true) + , m_didFailFTLCompilation(false) + , m_hasBeenCompiledWithFTL(false) + , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock) + , m_hasDebuggerStatement(false) + , m_steppingMode(SteppingModeDisabled) + , m_numBreakpoints(0) + , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable) + , m_vm(unlinkedCodeBlock->vm()) + , m_thisRegister(unlinkedCodeBlock->thisRegister()) + , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) + , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister()) + , m_isStrictMode(unlinkedCodeBlock->isStrictMode()) + , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode) + , m_mayBeExecuting(false) + , m_source(sourceProvider) + , m_sourceOffset(sourceOffset) + , m_firstLineColumnOffset(firstLineColumnOffset) + , m_codeType(unlinkedCodeBlock->codeType()) + , m_osrExitCounter(0) + , m_optimizationDelayCounter(0) + , m_reoptimizationRetryCounter(0) +#if ENABLE(JIT) + , m_capabilityLevelState(DFG::CapabilityLevelNotSet) +#endif +{ + m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed); + + ASSERT(m_heap->isDeferred()); + ASSERT(m_scopeRegister.isLocal()); + + ASSERT(m_source); + setNumParameters(unlinkedCodeBlock->numParameters()); + + if (vm()->typeProfiler() || vm()->controlFlowProfiler()) + vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset()); + + setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation()); + if (unlinkedCodeBlock->usesGlobalObject()) + m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get()); + + for (unsigned i = 0; i < LinkTimeConstantCount; i++) { + LinkTimeConstant type = static_cast<LinkTimeConstant>(i); + if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type)) + m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type)); + } + + HashSet<int, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> clonedConstantSymbolTables; + { + HashSet<SymbolTable*> clonedSymbolTables; + for (unsigned i = 0; i < m_constantRegisters.size(); i++) { + if (m_constantRegisters[i].get().isEmpty()) + continue; + if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) { + RELEASE_ASSERT(clonedSymbolTables.add(symbolTable).isNewEntry); + if (m_vm->typeProfiler()) { + ConcurrentJITLocker locker(symbolTable->m_lock); + symbolTable->prepareForTypeProfiling(locker); + } + m_constantRegisters[i].set(*m_vm, ownerExecutable, symbolTable->cloneScopePart(*m_vm)); + clonedConstantSymbolTables.add(i + FirstConstantRegisterIndex); + } + } + } + + m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls()); + for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) { + UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i); + if (vm()->typeProfiler() || vm()->controlFlowProfiler()) + vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); + m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source())); + } + + m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs()); + for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) { + UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i); + if (vm()->typeProfiler() || vm()->controlFlowProfiler()) + vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); + m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source())); + } + + if (unlinkedCodeBlock->hasRareData()) { + createRareDataIfNecessary(); + if (size_t count = unlinkedCodeBlock->constantBufferCount()) { + m_rareData->m_constantBuffers.grow(count); + for (size_t i = 0; i < count; i++) { + const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i); + m_rareData->m_constantBuffers[i] = buffer; + } + } + if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) { + m_rareData->m_exceptionHandlers.resizeToFit(count); + for (size_t i = 0; i < count; i++) { + const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i); + HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; +#if ENABLE(JIT) + handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch)))); +#else + handler.initialize(unlinkedHandler); +#endif + } + } + + if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) { + m_rareData->m_stringSwitchJumpTables.grow(count); + for (size_t i = 0; i < count; i++) { + UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin(); + UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end(); + for (; ptr != end; ++ptr) { + OffsetLocation offset; + offset.branchOffset = ptr->value; + m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset); + } + } + } + + if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) { + m_rareData->m_switchJumpTables.grow(count); + for (size_t i = 0; i < count; i++) { + UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i); + SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i]; + destTable.branchOffsets = sourceTable.branchOffsets; + destTable.min = sourceTable.min; + } + } + } + + // Allocate metadata buffers for the bytecode + if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos()) + m_llintCallLinkInfos.resizeToFit(size); + if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles()) + m_arrayProfiles.grow(size); + if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles()) + m_arrayAllocationProfiles.resizeToFit(size); + if (size_t size = unlinkedCodeBlock->numberOfValueProfiles()) + m_valueProfiles.resizeToFit(size); + if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles()) + m_objectAllocationProfiles.resizeToFit(size); + + // Copy and translate the UnlinkedInstructions + unsigned instructionCount = unlinkedCodeBlock->instructions().count(); + UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions()); + + RefCountedArray<Instruction> instructions(instructionCount); + + for (unsigned i = 0; !instructionReader.atEnd(); ) { + const UnlinkedInstruction* pc = instructionReader.next(); + + unsigned opLength = opcodeLength(pc[0].u.opcode); + + instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode); + for (size_t j = 1; j < opLength; ++j) { + if (sizeof(int32_t) != sizeof(intptr_t)) + instructions[i + j].u.pointer = 0; + instructions[i + j].u.operand = pc[j].u.operand; + } + switch (pc[0].u.opcode) { + case op_has_indexed_property: { + int arrayProfileIndex = pc[opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + break; + } + case op_call_varargs: + case op_construct_varargs: + case op_get_by_val: { + int arrayProfileIndex = pc[opLength - 2].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + + instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; + FALLTHROUGH; + } + case op_get_direct_pname: + case op_get_by_id: + case op_get_from_arguments: { + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; + ASSERT(profile->m_bytecodeOffset == -1); + profile->m_bytecodeOffset = i; + instructions[i + opLength - 1] = profile; + break; + } + case op_put_by_val: { + int arrayProfileIndex = pc[opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + break; + } + case op_put_by_val_direct: { + int arrayProfileIndex = pc[opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + break; + } + + case op_new_array: + case op_new_array_buffer: + case op_new_array_with_size: { + int arrayAllocationProfileIndex = pc[opLength - 1].u.operand; + instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; + break; + } + case op_new_object: { + int objectAllocationProfileIndex = pc[opLength - 1].u.operand; + ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; + int inferredInlineCapacity = pc[opLength - 2].u.operand; + + instructions[i + opLength - 1] = objectAllocationProfile; + objectAllocationProfile->initialize(*vm(), + m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity); + break; + } + + case op_call: + case op_call_eval: { + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; + ASSERT(profile->m_bytecodeOffset == -1); + profile->m_bytecodeOffset = i; + instructions[i + opLength - 1] = profile; + int arrayProfileIndex = pc[opLength - 2].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; + instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; + break; + } + case op_construct: { + instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; + ASSERT(profile->m_bytecodeOffset == -1); + profile->m_bytecodeOffset = i; + instructions[i + opLength - 1] = profile; + break; + } + case op_get_by_id_out_of_line: + case op_get_array_length: + CRASH(); + + case op_create_lexical_environment: { + int symbolTableIndex = pc[3].u.operand; + RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex)); + break; + } + + case op_resolve_scope: { + const Identifier& ident = identifier(pc[3].u.operand); + ResolveType type = static_cast<ResolveType>(pc[4].u.operand); + RELEASE_ASSERT(type != LocalClosureVar); + int localScopeDepth = pc[5].u.operand; + + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type); + instructions[i + 4].u.operand = op.type; + instructions[i + 5].u.operand = op.depth; + if (op.lexicalEnvironment) + instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable()); + else + instructions[i + 6].u.pointer = nullptr; + break; + } + + case op_get_from_scope: { + ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand]; + ASSERT(profile->m_bytecodeOffset == -1); + profile->m_bytecodeOffset = i; + instructions[i + opLength - 1] = profile; + + // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand + + int localScopeDepth = pc[5].u.operand; + instructions[i + 5].u.pointer = nullptr; + + ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand); + if (modeAndType.type() == LocalClosureVar) { + instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand(); + break; + } + + const Identifier& ident = identifier(pc[3].u.operand); + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, modeAndType.type()); + + instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand(); + if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks) + instructions[i + 5].u.watchpointSet = op.watchpointSet; + else if (op.structure) + instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure); + instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); + break; + } + + case op_put_to_scope: { + // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand + ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand); + if (modeAndType.type() == LocalClosureVar) { + // Only do watching if the property we're putting to is not anonymous. + if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { + int symbolTableIndex = pc[5].u.operand; + RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex)); + SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); + const Identifier& ident = identifier(pc[2].u.operand); + ConcurrentJITLocker locker(symbolTable->m_lock); + auto iter = symbolTable->find(locker, ident.impl()); + RELEASE_ASSERT(iter != symbolTable->end(locker)); + iter->value.prepareToWatch(); + instructions[i + 5].u.watchpointSet = iter->value.watchpointSet(); + } else + instructions[i + 5].u.watchpointSet = nullptr; + break; + } + + const Identifier& ident = identifier(pc[2].u.operand); + int localScopeDepth = pc[5].u.operand; + instructions[i + 5].u.pointer = nullptr; + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, modeAndType.type()); + + instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand(); + if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks) + instructions[i + 5].u.watchpointSet = op.watchpointSet; + else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { + if (op.watchpointSet) + op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident)); + } else if (op.structure) + instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure); + instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); + + break; + } + + case op_profile_type: { + RELEASE_ASSERT(vm()->typeProfiler()); + // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? + size_t instructionOffset = i + opLength - 1; + unsigned divotStart, divotEnd; + GlobalVariableID globalVariableID = 0; + RefPtr<TypeSet> globalTypeSet; + bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); + VirtualRegister profileRegister(pc[1].u.operand); + ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand); + SymbolTable* symbolTable = nullptr; + + switch (flag) { + case ProfileTypeBytecodeClosureVar: { + const Identifier& ident = identifier(pc[4].u.operand); + int localScopeDepth = pc[2].u.operand; + ResolveType type = static_cast<ResolveType>(pc[5].u.operand); + // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because + // we're abstractly "read"ing from a JSScope. + ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type); + + if (op.type == ClosureVar) + symbolTable = op.lexicalEnvironment->symbolTable(); + else if (op.type == GlobalVar) + symbolTable = m_globalObject.get()->symbolTable(); + + if (symbolTable) { + ConcurrentJITLocker locker(symbolTable->m_lock); + // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. + symbolTable->prepareForTypeProfiling(locker); + globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm()); + globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm()); + } else + globalVariableID = TypeProfilerNoGlobalIDExists; + + break; + } + case ProfileTypeBytecodeLocallyResolved: { + int symbolTableIndex = pc[2].u.operand; + RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex)); + SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); + const Identifier& ident = identifier(pc[4].u.operand); + ConcurrentJITLocker locker(symbolTable->m_lock); + // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. + globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm()); + globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm()); + + break; + } + case ProfileTypeBytecodeDoesNotHaveGlobalID: + case ProfileTypeBytecodeFunctionArgument: { + globalVariableID = TypeProfilerNoGlobalIDExists; + break; + } + case ProfileTypeBytecodeFunctionReturnStatement: { + RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); + globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); + globalVariableID = TypeProfilerReturnStatement; + if (!shouldAnalyze) { + // Because a return statement can be added implicitly to return undefined at the end of a function, + // and these nodes don't emit expression ranges because they aren't in the actual source text of + // the user's program, give the type profiler some range to identify these return statements. + // Currently, the text offset that is used as identification is on the open brace of the function + // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. + divotStart = divotEnd = m_sourceOffset; + shouldAnalyze = true; + } + break; + } + } + + std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, + m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm()); + TypeLocation* location = locationPair.first; + bool isNewLocation = locationPair.second; + + if (flag == ProfileTypeBytecodeFunctionReturnStatement) + location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset; + + if (shouldAnalyze && isNewLocation) + vm()->typeProfiler()->insertNewLocation(location); + + instructions[i + 2].u.location = location; + break; + } + + case op_debug: { + if (pc[1].u.index == DidReachBreakpoint) + m_hasDebuggerStatement = true; + break; + } + + default: + break; + } + i += opLength; + } + + if (vm()->controlFlowProfiler()) + insertBasicBlockBoundariesForControlFlowProfiler(instructions); + + m_instructions = WTF::move(instructions); + + // Set optimization thresholds only after m_instructions is initialized, since these + // rely on the instruction count (and are in theory permitted to also inspect the + // instruction stream to more accurate assess the cost of tier-up). + optimizeAfterWarmUp(); + jitAfterWarmUp(); + + // If the concurrent thread will want the code block's hash, then compute it here + // synchronously. + if (Options::alwaysComputeHash()) + hash(); + + if (Options::dumpGeneratedBytecodes()) + dumpBytecode(); + + m_heap->m_codeBlocks.add(this); + m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction)); +} + +CodeBlock::~CodeBlock() +{ + if (m_vm->m_perBytecodeProfiler) + m_vm->m_perBytecodeProfiler->notifyDestruction(this); + +#if ENABLE(VERBOSE_VALUE_PROFILE) + dumpValueProfiles(); +#endif + while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) + m_incomingLLIntCalls.begin()->remove(); +#if ENABLE(JIT) + // We may be destroyed before any CodeBlocks that refer to us are destroyed. + // Consider that two CodeBlocks become unreachable at the same time. There + // is no guarantee about the order in which the CodeBlocks are destroyed. + // So, if we don't remove incoming calls, and get destroyed before the + // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's + // destructor will try to remove nodes from our (no longer valid) linked list. + while (m_incomingCalls.begin() != m_incomingCalls.end()) + m_incomingCalls.begin()->remove(); + while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end()) + m_incomingPolymorphicCalls.begin()->remove(); + + // Note that our outgoing calls will be removed from other CodeBlocks' + // m_incomingCalls linked lists through the execution of the ~CallLinkInfo + // destructors. + + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) + (*iter)->deref(); +#endif // ENABLE(JIT) +} + +void CodeBlock::setNumParameters(int newValue) +{ + m_numParameters = newValue; + + m_argumentValueProfiles.resizeToFit(newValue); +} + +void EvalCodeCache::visitAggregate(SlotVisitor& visitor) +{ + EvalCacheMap::iterator end = m_cacheMap.end(); + for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr) + visitor.append(&ptr->value); +} + +CodeBlock* CodeBlock::specialOSREntryBlockOrNull() +{ +#if ENABLE(FTL_JIT) + if (jitType() != JITCode::DFGJIT) + return 0; + DFG::JITCode* jitCode = m_jitCode->dfg(); + return jitCode->osrEntryBlock.get(); +#else // ENABLE(FTL_JIT) + return 0; +#endif // ENABLE(FTL_JIT) +} + +void CodeBlock::visitAggregate(SlotVisitor& visitor) +{ +#if ENABLE(PARALLEL_GC) + // I may be asked to scan myself more than once, and it may even happen concurrently. + // To this end, use an atomic operation to check (and set) if I've been called already. + // Only one thread may proceed past this point - whichever one wins the atomic set race. + bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true); + if (!setByMe) + return; +#endif // ENABLE(PARALLEL_GC) + + if (!!m_alternative) + m_alternative->visitAggregate(visitor); + + if (CodeBlock* otherBlock = specialOSREntryBlockOrNull()) + otherBlock->visitAggregate(visitor); + + visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock)); + if (m_jitCode) + visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size()); + if (m_instructions.size()) { + // Divide by refCount() because m_instructions points to something that is shared + // by multiple CodeBlocks, and we only want to count it towards the heap size once. + // Having each CodeBlock report only its proportional share of the size is one way + // of accomplishing this. + visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount()); + } + + visitor.append(&m_unlinkedCode); + + // There are three things that may use unconditional finalizers: lazy bytecode freeing, + // inline cache clearing, and jettisoning. The probability of us wanting to do at + // least one of those things is probably quite close to 1. So we add one no matter what + // and when it runs, it figures out whether it has any work to do. + visitor.addUnconditionalFinalizer(this); + + m_allTransitionsHaveBeenMarked = false; + + if (shouldImmediatelyAssumeLivenessDuringScan()) { + // This code block is live, so scan all references strongly and return. + stronglyVisitStrongReferences(visitor); + stronglyVisitWeakReferences(visitor); + propagateTransitions(visitor); + return; + } + + // There are two things that we use weak reference harvesters for: DFG fixpoint for + // jettisoning, and trying to find structures that would be live based on some + // inline cache. So it makes sense to register them regardless. + visitor.addWeakReferenceHarvester(this); + +#if ENABLE(DFG_JIT) + // We get here if we're live in the sense that our owner executable is live, + // but we're not yet live for sure in another sense: we may yet decide that this + // code block should be jettisoned based on its outgoing weak references being + // stale. Set a flag to indicate that we're still assuming that we're dead, and + // perform one round of determining if we're live. The GC may determine, based on + // either us marking additional objects, or by other objects being marked for + // other reasons, that this iteration should run again; it will notify us of this + // decision by calling harvestWeakReferences(). + + m_jitCode->dfgCommon()->livenessHasBeenProved = false; + + propagateTransitions(visitor); + determineLiveness(visitor); +#else // ENABLE(DFG_JIT) + RELEASE_ASSERT_NOT_REACHED(); +#endif // ENABLE(DFG_JIT) +} + +bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan() +{ +#if ENABLE(DFG_JIT) + // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when + // their weak references go stale. So if a basline JIT CodeBlock gets + // scanned, we can assume that this means that it's live. + if (!JITCode::isOptimizingJIT(jitType())) + return true; + + // For simplicity, we don't attempt to jettison code blocks during GC if + // they are executing. Instead we strongly mark their weak references to + // allow them to continue to execute soundly. + if (m_mayBeExecuting) + return true; + + if (Options::forceDFGCodeBlockLiveness()) + return true; + + return false; +#else + return true; +#endif +} + +bool CodeBlock::isKnownToBeLiveDuringGC() +{ +#if ENABLE(DFG_JIT) + // This should return true for: + // - Code blocks that behave like normal objects - i.e. if they are referenced then they + // are live. + // - Code blocks that were running on the stack. + // - Code blocks that survived the last GC if the current GC is an Eden GC. This is + // because either livenessHasBeenProved would have survived as true or m_mayBeExecuting + // would survive as true. + // - Code blocks that don't have any dead weak references. + + return shouldImmediatelyAssumeLivenessDuringScan() + || m_jitCode->dfgCommon()->livenessHasBeenProved; +#else + return true; +#endif +} + +#if ENABLE(DFG_JIT) +static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition) +{ + if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get())) + return false; + + if (!Heap::isMarked(transition.m_from.get())) + return false; + + return true; +} +#endif // ENABLE(DFG_JIT) + +void CodeBlock::propagateTransitions(SlotVisitor& visitor) +{ + UNUSED_PARAM(visitor); + + if (m_allTransitionsHaveBeenMarked) + return; + + bool allAreMarkedSoFar = true; + + Interpreter* interpreter = m_vm->interpreter; + if (jitType() == JITCode::InterpreterThunk) { + const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); + for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { + Instruction* instruction = &instructions()[propertyAccessInstructions[i]]; + switch (interpreter->getOpcodeID(instruction[0].u.opcode)) { + case op_put_by_id_transition_direct: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: { + if (Heap::isMarked(instruction[4].u.structure.get())) + visitor.append(&instruction[6].u.structure); + else + allAreMarkedSoFar = false; + break; + } + default: + break; + } + } + } + +#if ENABLE(JIT) + if (JITCode::isJIT(jitType())) { + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) { + StructureStubInfo& stubInfo = **iter; + switch (stubInfo.accessType) { + case access_put_by_id_transition_normal: + case access_put_by_id_transition_direct: { + JSCell* origin = stubInfo.codeOrigin.codeOriginOwner(); + if ((!origin || Heap::isMarked(origin)) + && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get())) + visitor.append(&stubInfo.u.putByIdTransition.structure); + else + allAreMarkedSoFar = false; + break; + } + + case access_put_by_id_list: { + PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list; + JSCell* origin = stubInfo.codeOrigin.codeOriginOwner(); + if (origin && !Heap::isMarked(origin)) { + allAreMarkedSoFar = false; + break; + } + for (unsigned j = list->size(); j--;) { + PutByIdAccess& access = list->m_list[j]; + if (!access.isTransition()) + continue; + if (Heap::isMarked(access.oldStructure())) + visitor.append(&access.m_newStructure); + else + allAreMarkedSoFar = false; + } + break; + } + + default: + break; + } + } + } +#endif // ENABLE(JIT) + +#if ENABLE(DFG_JIT) + if (JITCode::isOptimizingJIT(jitType())) { + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + + for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) { + if (shouldMarkTransition(dfgCommon->transitions[i])) { + // If the following three things are live, then the target of the + // transition is also live: + // + // - This code block. We know it's live already because otherwise + // we wouldn't be scanning ourselves. + // + // - The code origin of the transition. Transitions may arise from + // code that was inlined. They are not relevant if the user's + // object that is required for the inlinee to run is no longer + // live. + // + // - The source of the transition. The transition checks if some + // heap location holds the source, and if so, stores the target. + // Hence the source must be live for the transition to be live. + // + // We also short-circuit the liveness if the structure is harmless + // to mark (i.e. its global object and prototype are both already + // live). + + visitor.append(&dfgCommon->transitions[i].m_to); + } else + allAreMarkedSoFar = false; + } + } +#endif // ENABLE(DFG_JIT) + + if (allAreMarkedSoFar) + m_allTransitionsHaveBeenMarked = true; +} + +void CodeBlock::determineLiveness(SlotVisitor& visitor) +{ + UNUSED_PARAM(visitor); + + if (shouldImmediatelyAssumeLivenessDuringScan()) + return; + +#if ENABLE(DFG_JIT) + // Check if we have any remaining work to do. + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + if (dfgCommon->livenessHasBeenProved) + return; + + // Now check all of our weak references. If all of them are live, then we + // have proved liveness and so we scan our strong references. If at end of + // GC we still have not proved liveness, then this code block is toast. + bool allAreLiveSoFar = true; + for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { + if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) { + allAreLiveSoFar = false; + break; + } + } + if (allAreLiveSoFar) { + for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) { + if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) { + allAreLiveSoFar = false; + break; + } + } + } + + // If some weak references are dead, then this fixpoint iteration was + // unsuccessful. + if (!allAreLiveSoFar) + return; + + // All weak references are live. Record this information so we don't + // come back here again, and scan the strong references. + dfgCommon->livenessHasBeenProved = true; + stronglyVisitStrongReferences(visitor); +#endif // ENABLE(DFG_JIT) +} + +void CodeBlock::visitWeakReferences(SlotVisitor& visitor) +{ + propagateTransitions(visitor); + determineLiveness(visitor); +} + +void CodeBlock::finalizeUnconditionally() +{ + Interpreter* interpreter = m_vm->interpreter; + if (JITCode::couldBeInterpreted(jitType())) { + const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); + for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { + Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]]; + switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) { + case op_get_by_id: + case op_get_by_id_out_of_line: + case op_put_by_id: + case op_put_by_id_out_of_line: + if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get())) + break; + if (Options::verboseOSR()) + dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get()); + curInstruction[4].u.structure.clear(); + curInstruction[5].u.operand = 0; + break; + case op_put_by_id_transition_direct: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: + if (Heap::isMarked(curInstruction[4].u.structure.get()) + && Heap::isMarked(curInstruction[6].u.structure.get()) + && Heap::isMarked(curInstruction[7].u.structureChain.get())) + break; + if (Options::verboseOSR()) { + dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n", + curInstruction[4].u.structure.get(), + curInstruction[6].u.structure.get(), + curInstruction[7].u.structureChain.get()); + } + curInstruction[4].u.structure.clear(); + curInstruction[6].u.structure.clear(); + curInstruction[7].u.structureChain.clear(); + curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id); + break; + case op_get_array_length: + break; + case op_to_this: + if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get())) + break; + if (Options::verboseOSR()) + dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get()); + curInstruction[2].u.structure.clear(); + curInstruction[3].u.toThisStatus = merge( + curInstruction[3].u.toThisStatus, ToThisClearedByGC); + break; + case op_create_this: { + auto& cacheWriteBarrier = curInstruction[4].u.jsCell; + if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) + break; + JSCell* cachedFunction = cacheWriteBarrier.get(); + if (Heap::isMarked(cachedFunction)) + break; + if (Options::verboseOSR()) + dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction); + cacheWriteBarrier.clear(); + break; + } + case op_resolve_scope: { + // Right now this isn't strictly necessary. Any symbol tables that this will refer to + // are for outer functions, and we refer to those functions strongly, and they refer + // to the symbol table strongly. But it's nice to be on the safe side. + WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable; + if (!symbolTable || Heap::isMarked(symbolTable.get())) + break; + if (Options::verboseOSR()) + dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get()); + symbolTable.clear(); + break; + } + case op_get_from_scope: + case op_put_to_scope: { + ResolveModeAndType modeAndType = + ResolveModeAndType(curInstruction[4].u.operand); + if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar) + continue; + WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure; + if (!structure || Heap::isMarked(structure.get())) + break; + if (Options::verboseOSR()) + dataLogF("Clearing scope access with structure %p.\n", structure.get()); + structure.clear(); + break; + } + default: + OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode); + ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); + } + } + + for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) { + if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) { + if (Options::verboseOSR()) + dataLog("Clearing LLInt call from ", *this, "\n"); + m_llintCallLinkInfos[i].unlink(); + } + if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get())) + m_llintCallLinkInfos[i].lastSeenCallee.clear(); + } + } + +#if ENABLE(DFG_JIT) + // Check if we're not live. If we are, then jettison. + if (!isKnownToBeLiveDuringGC()) { + if (Options::verboseOSR()) + dataLog(*this, " has dead weak references, jettisoning during GC.\n"); + + if (DFG::shouldShowDisassembly()) { + dataLog(*this, " will be jettisoned because of the following dead references:\n"); + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) { + DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i]; + JSCell* origin = transition.m_codeOrigin.get(); + JSCell* from = transition.m_from.get(); + JSCell* to = transition.m_to.get(); + if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from)) + continue; + dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n"); + } + for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { + JSCell* weak = dfgCommon->weakReferences[i].get(); + if (Heap::isMarked(weak)) + continue; + dataLog(" Weak reference ", RawPointer(weak), ".\n"); + } + } + + jettison(Profiler::JettisonDueToWeakReference); + return; + } +#endif // ENABLE(DFG_JIT) + +#if ENABLE(JIT) + // Handle inline caches. + if (!!jitCode()) { + RepatchBuffer repatchBuffer(this); + + for (auto iter = callLinkInfosBegin(); !!iter; ++iter) + (*iter)->visitWeak(repatchBuffer); + + for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) { + StructureStubInfo& stubInfo = **iter; + + if (stubInfo.visitWeakReferences(repatchBuffer)) + continue; + + resetStubDuringGCInternal(repatchBuffer, stubInfo); + } + } +#endif +} + +void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result) +{ +#if ENABLE(JIT) + toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result); +#else + UNUSED_PARAM(result); +#endif +} + +void CodeBlock::getStubInfoMap(StubInfoMap& result) +{ + ConcurrentJITLocker locker(m_lock); + getStubInfoMap(locker, result); +} + +void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result) +{ +#if ENABLE(JIT) + toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result); +#else + UNUSED_PARAM(result); +#endif +} + +void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result) +{ + ConcurrentJITLocker locker(m_lock); + getCallLinkInfoMap(locker, result); +} + +void CodeBlock::getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result) +{ +#if ENABLE(JIT) + for (auto* byValInfo : m_byValInfos) + result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo); +#else + UNUSED_PARAM(result); +#endif +} + +void CodeBlock::getByValInfoMap(ByValInfoMap& result) +{ + ConcurrentJITLocker locker(m_lock); + getByValInfoMap(locker, result); +} + +#if ENABLE(JIT) +StructureStubInfo* CodeBlock::addStubInfo() +{ + ConcurrentJITLocker locker(m_lock); + return m_stubInfos.add(); +} + +StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin) +{ + for (StructureStubInfo* stubInfo : m_stubInfos) { + if (stubInfo->codeOrigin == codeOrigin) + return stubInfo; + } + return nullptr; +} + +ByValInfo* CodeBlock::addByValInfo() +{ + ConcurrentJITLocker locker(m_lock); + return m_byValInfos.add(); +} + +CallLinkInfo* CodeBlock::addCallLinkInfo() +{ + ConcurrentJITLocker locker(m_lock); + return m_callLinkInfos.add(); +} + +void CodeBlock::resetStub(StructureStubInfo& stubInfo) +{ + if (stubInfo.accessType == access_unset) + return; + + ConcurrentJITLocker locker(m_lock); + + RepatchBuffer repatchBuffer(this); + resetStubInternal(repatchBuffer, stubInfo); +} + +void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo) +{ + AccessType accessType = static_cast<AccessType>(stubInfo.accessType); + + if (Options::verboseOSR()) { + // This can be called from GC destructor calls, so we don't try to do a full dump + // of the CodeBlock. + dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n"); + } + + RELEASE_ASSERT(JITCode::isJIT(jitType())); + + if (isGetByIdAccess(accessType)) + resetGetByID(repatchBuffer, stubInfo); + else if (isPutByIdAccess(accessType)) + resetPutByID(repatchBuffer, stubInfo); + else { + RELEASE_ASSERT(isInAccess(accessType)); + resetIn(repatchBuffer, stubInfo); + } + + stubInfo.reset(); +} + +void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo) +{ + resetStubInternal(repatchBuffer, stubInfo); + stubInfo.resetByGC = true; +} + +CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index) +{ + for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) { + if ((*iter)->codeOrigin() == CodeOrigin(index)) + return *iter; + } + return nullptr; +} +#endif + +void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor) +{ + visitor.append(&m_globalObject); + visitor.append(&m_ownerExecutable); + visitor.append(&m_unlinkedCode); + if (m_rareData) + m_rareData->m_evalCodeCache.visitAggregate(visitor); + visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size()); + for (size_t i = 0; i < m_functionExprs.size(); ++i) + visitor.append(&m_functionExprs[i]); + for (size_t i = 0; i < m_functionDecls.size(); ++i) + visitor.append(&m_functionDecls[i]); + for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i) + m_objectAllocationProfiles[i].visitAggregate(visitor); + +#if ENABLE(DFG_JIT) + if (JITCode::isOptimizingJIT(jitType())) { + // FIXME: This is an antipattern for two reasons. References introduced by the DFG + // that aren't in the original CodeBlock being compiled should be weakly referenced. + // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also, + // those weak references should already be tracked in the DFG as weak FrozenValues. So, + // there is probably no need for this. We already have assertions that this should be + // unnecessary. + // https://bugs.webkit.org/show_bug.cgi?id=146613 + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + if (dfgCommon->inlineCallFrames.get()) + dfgCommon->inlineCallFrames->visitAggregate(visitor); + } +#endif + + updateAllPredictions(); +} + +void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor) +{ + UNUSED_PARAM(visitor); + +#if ENABLE(DFG_JIT) + if (!JITCode::isOptimizingJIT(jitType())) + return; + + DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); + + for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) { + if (!!dfgCommon->transitions[i].m_codeOrigin) + visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though. + visitor.append(&dfgCommon->transitions[i].m_from); + visitor.append(&dfgCommon->transitions[i].m_to); + } + + for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) + visitor.append(&dfgCommon->weakReferences[i]); + + for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) + visitor.append(&dfgCommon->weakStructureReferences[i]); +#endif +} + +CodeBlock* CodeBlock::baselineAlternative() +{ +#if ENABLE(JIT) + CodeBlock* result = this; + while (result->alternative()) + result = result->alternative(); + RELEASE_ASSERT(result); + RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None); + return result; +#else + return this; +#endif +} + +CodeBlock* CodeBlock::baselineVersion() +{ +#if ENABLE(JIT) + if (JITCode::isBaselineCode(jitType())) + return this; + CodeBlock* result = replacement(); + if (!result) { + // This can happen if we're creating the original CodeBlock for an executable. + // Assume that we're the baseline CodeBlock. + RELEASE_ASSERT(jitType() == JITCode::None); + return this; + } + result = result->baselineAlternative(); + return result; +#else + return this; +#endif +} + +#if ENABLE(JIT) +bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace) +{ + return JITCode::isHigherTier(replacement()->jitType(), typeToReplace); +} + +bool CodeBlock::hasOptimizedReplacement() +{ + return hasOptimizedReplacement(jitType()); +} +#endif + +HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler) +{ + RELEASE_ASSERT(bytecodeOffset < instructions().size()); + + if (!m_rareData) + return 0; + + Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers; + for (size_t i = 0; i < exceptionHandlers.size(); ++i) { + HandlerInfo& handler = exceptionHandlers[i]; + if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler()) + continue; + + // Handlers are ordered innermost first, so the first handler we encounter + // that contains the source address is the correct handler to use. + if (handler.start <= bytecodeOffset && handler.end > bytecodeOffset) + return &handler; + } + + return 0; +} + +unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) +{ + RELEASE_ASSERT(bytecodeOffset < instructions().size()); + return m_ownerExecutable->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset); +} + +unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset) +{ + int divot; + int startOffset; + int endOffset; + unsigned line; + unsigned column; + expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); + return column; +} + +void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) +{ + m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); + divot += m_sourceOffset; + column += line ? 1 : firstLineColumnOffset(); + line += m_ownerExecutable->firstLine(); +} + +bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) +{ + Interpreter* interpreter = vm()->interpreter; + const Instruction* begin = instructions().begin(); + const Instruction* end = instructions().end(); + for (const Instruction* it = begin; it != end;) { + OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode); + if (opcodeID == op_debug) { + unsigned bytecodeOffset = it - begin; + int unused; + unsigned opDebugLine; + unsigned opDebugColumn; + expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn); + if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) + return true; + } + it += opcodeLengths[opcodeID]; + } + return false; +} + +void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) +{ + m_rareCaseProfiles.shrinkToFit(); + m_specialFastCaseProfiles.shrinkToFit(); + + if (shrinkMode == EarlyShrink) { + m_constantRegisters.shrinkToFit(); + m_constantsSourceCodeRepresentation.shrinkToFit(); + + if (m_rareData) { + m_rareData->m_switchJumpTables.shrinkToFit(); + m_rareData->m_stringSwitchJumpTables.shrinkToFit(); + } + } // else don't shrink these, because we would have already pointed pointers into these tables. +} + +#if ENABLE(JIT) +void CodeBlock::unlinkCalls() +{ + if (!!m_alternative) + m_alternative->unlinkCalls(); + for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) { + if (m_llintCallLinkInfos[i].isLinked()) + m_llintCallLinkInfos[i].unlink(); + } + if (m_callLinkInfos.isEmpty()) + return; + if (!m_vm->canUseJIT()) + return; + RepatchBuffer repatchBuffer(this); + for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) { + CallLinkInfo& info = **iter; + if (!info.isLinked()) + continue; + info.unlink(repatchBuffer); + } +} + +void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming) +{ + noticeIncomingCall(callerFrame); + m_incomingCalls.push(incoming); +} + +void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming) +{ + noticeIncomingCall(callerFrame); + m_incomingPolymorphicCalls.push(incoming); +} +#endif // ENABLE(JIT) + +void CodeBlock::unlinkIncomingCalls() +{ + while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) + m_incomingLLIntCalls.begin()->unlink(); +#if ENABLE(JIT) + if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty()) + return; + RepatchBuffer repatchBuffer(this); + while (m_incomingCalls.begin() != m_incomingCalls.end()) + m_incomingCalls.begin()->unlink(repatchBuffer); + while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end()) + m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer); +#endif // ENABLE(JIT) +} + +void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming) +{ + noticeIncomingCall(callerFrame); + m_incomingLLIntCalls.push(incoming); +} + +void CodeBlock::install() +{ + ownerExecutable()->installCode(this); +} + +PassRefPtr<CodeBlock> CodeBlock::newReplacement() +{ + return ownerExecutable()->newReplacementCodeBlockFor(specializationKind()); +} + +#if ENABLE(JIT) +CodeBlock* ProgramCodeBlock::replacement() +{ + return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock(); +} + +CodeBlock* EvalCodeBlock::replacement() +{ + return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock(); +} + +CodeBlock* FunctionCodeBlock::replacement() +{ + return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall); +} + +DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal() +{ + return DFG::programCapabilityLevel(this); +} + +DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal() +{ + return DFG::evalCapabilityLevel(this); +} + +DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal() +{ + if (m_isConstructor) + return DFG::functionForConstructCapabilityLevel(this); + return DFG::functionForCallCapabilityLevel(this); +} +#endif + +void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail) +{ + RELEASE_ASSERT(reason != Profiler::NotJettisoned); + +#if ENABLE(DFG_JIT) + if (DFG::shouldShowDisassembly()) { + dataLog("Jettisoning ", *this); + if (mode == CountReoptimization) + dataLog(" and counting reoptimization"); + dataLog(" due to ", reason); + if (detail) + dataLog(", ", *detail); + dataLog(".\n"); + } + + DeferGCForAWhile deferGC(*m_heap); + RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType())); + + if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get()) + compilation->setJettisonReason(reason, detail); + + // We want to accomplish two things here: + // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it + // we should OSR exit at the top of the next bytecode instruction after the return. + // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock. + + // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about + // whether the invalidation has already happened. + if (!jitCode()->dfgCommon()->invalidate()) { + // Nothing to do since we've already been invalidated. That means that we cannot be + // the optimized replacement. + RELEASE_ASSERT(this != replacement()); + return; + } + + if (DFG::shouldShowDisassembly()) + dataLog(" Did invalidate ", *this, "\n"); + + // Count the reoptimization if that's what the user wanted. + if (mode == CountReoptimization) { + // FIXME: Maybe this should call alternative(). + // https://bugs.webkit.org/show_bug.cgi?id=123677 + baselineAlternative()->countReoptimization(); + if (DFG::shouldShowDisassembly()) + dataLog(" Did count reoptimization for ", *this, "\n"); + } + + // Now take care of the entrypoint. + if (this != replacement()) { + // This means that we were never the entrypoint. This can happen for OSR entry code + // blocks. + return; + } + alternative()->optimizeAfterWarmUp(); + tallyFrequentExitSites(); + alternative()->install(); + if (DFG::shouldShowDisassembly()) + dataLog(" Did install baseline version of ", *this, "\n"); +#else // ENABLE(DFG_JIT) + UNUSED_PARAM(mode); + UNUSED_PARAM(detail); + UNREACHABLE_FOR_PLATFORM(); +#endif // ENABLE(DFG_JIT) +} + +JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) +{ + if (!codeOrigin.inlineCallFrame) + return globalObject(); + return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject(); +} + +class RecursionCheckFunctor { +public: + RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck) + : m_startCallFrame(startCallFrame) + , m_codeBlock(codeBlock) + , m_depthToCheck(depthToCheck) + , m_foundStartCallFrame(false) + , m_didRecurse(false) + { } + + StackVisitor::Status operator()(StackVisitor& visitor) + { + CallFrame* currentCallFrame = visitor->callFrame(); + + if (currentCallFrame == m_startCallFrame) + m_foundStartCallFrame = true; + + if (m_foundStartCallFrame) { + if (visitor->callFrame()->codeBlock() == m_codeBlock) { + m_didRecurse = true; + return StackVisitor::Done; + } + + if (!m_depthToCheck--) + return StackVisitor::Done; + } + + return StackVisitor::Continue; + } + + bool didRecurse() const { return m_didRecurse; } + +private: + CallFrame* m_startCallFrame; + CodeBlock* m_codeBlock; + unsigned m_depthToCheck; + bool m_foundStartCallFrame; + bool m_didRecurse; +}; + +void CodeBlock::noticeIncomingCall(ExecState* callerFrame) +{ + CodeBlock* callerCodeBlock = callerFrame->codeBlock(); + + if (Options::verboseCallLink()) + dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n"); + +#if ENABLE(DFG_JIT) + if (!m_shouldAlwaysBeInlined) + return; + + if (!callerCodeBlock) { + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is native.\n"); + return; + } + + if (!hasBaselineJITProfiling()) + return; + + if (!DFG::mightInlineFunction(this)) + return; + + if (!canInline(m_capabilityLevelState)) + return; + + if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) { + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is too large.\n"); + return; + } + + if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) { + // If the caller is still in the interpreter, then we can't expect inlining to + // happen anytime soon. Assume it's profitable to optimize it separately. This + // ensures that a function is SABI only if it is called no more frequently than + // any of its callers. + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is in LLInt.\n"); + return; + } + + if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI bcause caller was already optimized.\n"); + return; + } + + if (callerCodeBlock->codeType() != FunctionCode) { + // If the caller is either eval or global code, assume that that won't be + // optimized anytime soon. For eval code this is particularly true since we + // delay eval optimization by a *lot*. + m_shouldAlwaysBeInlined = false; + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because caller is not a function.\n"); + return; + } + + // Recursive calls won't be inlined. + RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth()); + vm()->topCallFrame->iterate(functor); + + if (functor.didRecurse()) { + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because recursion was detected.\n"); + m_shouldAlwaysBeInlined = false; + return; + } + + if (callerCodeBlock->m_capabilityLevelState == DFG::CapabilityLevelNotSet) { + dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n"); + CRASH(); + } + + if (canCompile(callerCodeBlock->m_capabilityLevelState)) + return; + + if (Options::verboseCallLink()) + dataLog(" Clearing SABI because the caller is not a DFG candidate.\n"); + + m_shouldAlwaysBeInlined = false; +#endif +} + +unsigned CodeBlock::reoptimizationRetryCounter() const +{ +#if ENABLE(JIT) + ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); + return m_reoptimizationRetryCounter; +#else + return 0; +#endif // ENABLE(JIT) +} + +#if ENABLE(JIT) +void CodeBlock::countReoptimization() +{ + m_reoptimizationRetryCounter++; + if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax()) + m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax(); +} + +unsigned CodeBlock::numberOfDFGCompiles() +{ + ASSERT(JITCode::isBaselineCode(jitType())); + if (Options::testTheFTL()) { + if (m_didFailFTLCompilation) + return 1000000; + return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter; + } + return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter; +} + +int32_t CodeBlock::codeTypeThresholdMultiplier() const +{ + if (codeType() == EvalCode) + return Options::evalThresholdMultiplier(); + + return 1; +} + +double CodeBlock::optimizationThresholdScalingFactor() +{ + // This expression arises from doing a least-squares fit of + // + // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d + // + // against the data points: + // + // x F[x_] + // 10 0.9 (smallest reasonable code block) + // 200 1.0 (typical small-ish code block) + // 320 1.2 (something I saw in 3d-cube that I wanted to optimize) + // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize) + // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort) + // 10000 6.0 (similar to above) + // + // I achieve the minimization using the following Mathematica code: + // + // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d + // + // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}} + // + // solution = + // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples), + // {a, b, c, d}][[2]] + // + // And the code below (to initialize a, b, c, d) is generated by: + // + // Print["const double " <> ToString[#[[1]]] <> " = " <> + // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution + // + // We've long known the following to be true: + // - Small code blocks are cheap to optimize and so we should do it sooner rather + // than later. + // - Large code blocks are expensive to optimize and so we should postpone doing so, + // and sometimes have a large enough threshold that we never optimize them. + // - The difference in cost is not totally linear because (a) just invoking the + // DFG incurs some base cost and (b) for large code blocks there is enough slop + // in the correlation between instruction count and the actual compilation cost + // that for those large blocks, the instruction count should not have a strong + // influence on our threshold. + // + // I knew the goals but I didn't know how to achieve them; so I picked an interesting + // example where the heuristics were right (code block in 3d-cube with instruction + // count 320, which got compiled early as it should have been) and one where they were + // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive + // to compile and didn't run often enough to warrant compilation in my opinion), and + // then threw in additional data points that represented my own guess of what our + // heuristics should do for some round-numbered examples. + // + // The expression to which I decided to fit the data arose because I started with an + // affine function, and then did two things: put the linear part in an Abs to ensure + // that the fit didn't end up choosing a negative value of c (which would result in + // the function turning over and going negative for large x) and I threw in a Sqrt + // term because Sqrt represents my intution that the function should be more sensitive + // to small changes in small values of x, but less sensitive when x gets large. + + // Note that the current fit essentially eliminates the linear portion of the + // expression (c == 0.0). + const double a = 0.061504; + const double b = 1.02406; + const double c = 0.0; + const double d = 0.825914; + + double instructionCount = this->instructionCount(); + + ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense. + + double result = d + a * sqrt(instructionCount + b) + c * instructionCount; + + result *= codeTypeThresholdMultiplier(); + + if (Options::verboseOSR()) { + dataLog( + *this, ": instruction count is ", instructionCount, + ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(), + "\n"); + } + return result; +} + +static int32_t clipThreshold(double threshold) +{ + if (threshold < 1.0) + return 1; + + if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max())) + return std::numeric_limits<int32_t>::max(); + + return static_cast<int32_t>(threshold); +} + +int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold) +{ + return clipThreshold( + static_cast<double>(desiredThreshold) * + optimizationThresholdScalingFactor() * + (1 << reoptimizationRetryCounter())); +} + +bool CodeBlock::checkIfOptimizationThresholdReached() +{ +#if ENABLE(DFG_JIT) + if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) { + if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode)) + == DFG::Worklist::Compiled) { + optimizeNextInvocation(); + return true; + } + } +#endif + + return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); +} + +void CodeBlock::optimizeNextInvocation() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing next invocation.\n"); + m_jitExecuteCounter.setNewThreshold(0, this); +} + +void CodeBlock::dontOptimizeAnytimeSoon() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Not optimizing anytime soon.\n"); + m_jitExecuteCounter.deferIndefinitely(); +} + +void CodeBlock::optimizeAfterWarmUp() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing after warm-up.\n"); +#if ENABLE(DFG_JIT) + m_jitExecuteCounter.setNewThreshold( + adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this); +#endif +} + +void CodeBlock::optimizeAfterLongWarmUp() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing after long warm-up.\n"); +#if ENABLE(DFG_JIT) + m_jitExecuteCounter.setNewThreshold( + adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this); +#endif +} + +void CodeBlock::optimizeSoon() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Optimizing soon.\n"); +#if ENABLE(DFG_JIT) + m_jitExecuteCounter.setNewThreshold( + adjustedCounterValue(Options::thresholdForOptimizeSoon()), this); +#endif +} + +void CodeBlock::forceOptimizationSlowPathConcurrently() +{ + if (Options::verboseOSR()) + dataLog(*this, ": Forcing slow path concurrently.\n"); + m_jitExecuteCounter.forceSlowPathConcurrently(); +} + +#if ENABLE(DFG_JIT) +void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result) +{ + JITCode::JITType type = jitType(); + if (type != JITCode::BaselineJIT) { + dataLog(*this, ": expected to have baseline code but have ", type, "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + + CodeBlock* theReplacement = replacement(); + if ((result == CompilationSuccessful) != (theReplacement != this)) { + dataLog(*this, ": we have result = ", result, " but "); + if (theReplacement == this) + dataLog("we are our own replacement.\n"); + else + dataLog("our replacement is ", pointerDump(theReplacement), "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + + switch (result) { + case CompilationSuccessful: + RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType())); + optimizeNextInvocation(); + return; + case CompilationFailed: + dontOptimizeAnytimeSoon(); + return; + case CompilationDeferred: + // We'd like to do dontOptimizeAnytimeSoon() but we cannot because + // forceOptimizationSlowPathConcurrently() is inherently racy. It won't + // necessarily guarantee anything. So, we make sure that even if that + // function ends up being a no-op, we still eventually retry and realize + // that we have optimized code ready. + optimizeAfterWarmUp(); + return; + case CompilationInvalidated: + // Retry with exponential backoff. + countReoptimization(); + optimizeAfterWarmUp(); + return; + } + + dataLog("Unrecognized result: ", static_cast<int>(result), "\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +#endif + +uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold) +{ + ASSERT(JITCode::isOptimizingJIT(jitType())); + // Compute this the lame way so we don't saturate. This is called infrequently + // enough that this loop won't hurt us. + unsigned result = desiredThreshold; + for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) { + unsigned newResult = result << 1; + if (newResult < result) + return std::numeric_limits<uint32_t>::max(); + result = newResult; + } + return result; +} + +uint32_t CodeBlock::exitCountThresholdForReoptimization() +{ + return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier()); +} + +uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop() +{ + return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier()); +} + +bool CodeBlock::shouldReoptimizeNow() +{ + return osrExitCounter() >= exitCountThresholdForReoptimization(); +} + +bool CodeBlock::shouldReoptimizeFromLoopNow() +{ + return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop(); +} +#endif + +ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset) +{ + for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) { + if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset) + return &m_arrayProfiles[i]; + } + return 0; +} + +ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset) +{ + ArrayProfile* result = getArrayProfile(bytecodeOffset); + if (result) + return result; + return addArrayProfile(bytecodeOffset); +} + +void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles) +{ + ConcurrentJITLocker locker(m_lock); + + numberOfLiveNonArgumentValueProfiles = 0; + numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full. + for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) { + ValueProfile* profile = getFromAllValueProfiles(i); + unsigned numSamples = profile->totalNumberOfSamples(); + if (numSamples > ValueProfile::numberOfBuckets) + numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight. + numberOfSamplesInProfiles += numSamples; + if (profile->m_bytecodeOffset < 0) { + profile->computeUpdatedPrediction(locker); + continue; + } + if (profile->numberOfSamples() || profile->m_prediction != SpecNone) + numberOfLiveNonArgumentValueProfiles++; + profile->computeUpdatedPrediction(locker); + } + +#if ENABLE(DFG_JIT) + m_lazyOperandValueProfiles.computeUpdatedPredictions(locker); +#endif +} + +void CodeBlock::updateAllValueProfilePredictions() +{ + unsigned ignoredValue1, ignoredValue2; + updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2); +} + +void CodeBlock::updateAllArrayPredictions() +{ + ConcurrentJITLocker locker(m_lock); + + for (unsigned i = m_arrayProfiles.size(); i--;) + m_arrayProfiles[i].computeUpdatedPrediction(locker, this); + + // Don't count these either, for similar reasons. + for (unsigned i = m_arrayAllocationProfiles.size(); i--;) + m_arrayAllocationProfiles[i].updateIndexingType(); +} + +void CodeBlock::updateAllPredictions() +{ + updateAllValueProfilePredictions(); + updateAllArrayPredictions(); +} + +bool CodeBlock::shouldOptimizeNow() +{ + if (Options::verboseOSR()) + dataLog("Considering optimizing ", *this, "...\n"); + + if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay()) + return true; + + updateAllArrayPredictions(); + + unsigned numberOfLiveNonArgumentValueProfiles; + unsigned numberOfSamplesInProfiles; + updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles); + + if (Options::verboseOSR()) { + dataLogF( + "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n", + (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), + numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(), + (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(), + numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles()); + } + + if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate()) + && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate()) + && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay()) + return true; + + ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max()); + m_optimizationDelayCounter++; + optimizeAfterWarmUp(); + return false; +} + +#if ENABLE(DFG_JIT) +void CodeBlock::tallyFrequentExitSites() +{ + ASSERT(JITCode::isOptimizingJIT(jitType())); + ASSERT(alternative()->jitType() == JITCode::BaselineJIT); + + CodeBlock* profiledBlock = alternative(); + + switch (jitType()) { + case JITCode::DFGJIT: { + DFG::JITCode* jitCode = m_jitCode->dfg(); + for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { + DFG::OSRExit& exit = jitCode->osrExit[i]; + exit.considerAddingAsFrequentExitSite(profiledBlock); + } + break; + } + +#if ENABLE(FTL_JIT) + case JITCode::FTLJIT: { + // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit + // vector contains a totally different type, that just so happens to behave like + // DFG::JITCode::osrExit. + FTL::JITCode* jitCode = m_jitCode->ftl(); + for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { + FTL::OSRExit& exit = jitCode->osrExit[i]; + exit.considerAddingAsFrequentExitSite(profiledBlock); + } + break; + } +#endif + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +} +#endif // ENABLE(DFG_JIT) + +#if ENABLE(VERBOSE_VALUE_PROFILE) +void CodeBlock::dumpValueProfiles() +{ + dataLog("ValueProfile for ", *this, ":\n"); + for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) { + ValueProfile* profile = getFromAllValueProfiles(i); + if (profile->m_bytecodeOffset < 0) { + ASSERT(profile->m_bytecodeOffset == -1); + dataLogF(" arg = %u: ", i); + } else + dataLogF(" bc = %d: ", profile->m_bytecodeOffset); + if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) { + dataLogF("<empty>\n"); + continue; + } + profile->dump(WTF::dataFile()); + dataLogF("\n"); + } + dataLog("RareCaseProfile for ", *this, ":\n"); + for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) { + RareCaseProfile* profile = rareCaseProfile(i); + dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter); + } + dataLog("SpecialFastCaseProfile for ", *this, ":\n"); + for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) { + RareCaseProfile* profile = specialFastCaseProfile(i); + dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter); + } +} +#endif // ENABLE(VERBOSE_VALUE_PROFILE) + +unsigned CodeBlock::frameRegisterCount() +{ + switch (jitType()) { + case JITCode::InterpreterThunk: + return LLInt::frameRegisterCountFor(this); + +#if ENABLE(JIT) + case JITCode::BaselineJIT: + return JIT::frameRegisterCountFor(this); +#endif // ENABLE(JIT) + +#if ENABLE(DFG_JIT) + case JITCode::DFGJIT: + case JITCode::FTLJIT: + return jitCode()->dfgCommon()->frameRegisterCount; +#endif // ENABLE(DFG_JIT) + + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } +} + +int CodeBlock::stackPointerOffset() +{ + return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); +} + +size_t CodeBlock::predictedMachineCodeSize() +{ + // This will be called from CodeBlock::CodeBlock before either m_vm or the + // instructions have been initialized. It's OK to return 0 because what will really + // matter is the recomputation of this value when the slow path is triggered. + if (!m_vm) + return 0; + + if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT) + return 0; // It's as good of a prediction as we'll get. + + // Be conservative: return a size that will be an overestimation 84% of the time. + double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() + + m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation(); + + // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing + // here is OK, since this whole method is just a heuristic. + if (multiplier < 0 || multiplier > 1000) + return 0; + + double doubleResult = multiplier * m_instructions.size(); + + // Be even more paranoid: silently reject values that won't fit into a size_t. If + // the function is so huge that we can't even fit it into virtual memory then we + // should probably have some other guards in place to prevent us from even getting + // to this point. + if (doubleResult > std::numeric_limits<size_t>::max()) + return 0; + + return static_cast<size_t>(doubleResult); +} + +bool CodeBlock::usesOpcode(OpcodeID opcodeID) +{ + Interpreter* interpreter = vm()->interpreter; + Instruction* instructionsBegin = instructions().begin(); + unsigned instructionCount = instructions().size(); + + for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) { + switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) { +#define DEFINE_OP(curOpcode, length) \ + case curOpcode: \ + if (curOpcode == opcodeID) \ + return true; \ + bytecodeOffset += length; \ + break; + FOR_EACH_OPCODE_ID(DEFINE_OP) +#undef DEFINE_OP + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + } + + return false; +} + +String CodeBlock::nameForRegister(VirtualRegister virtualRegister) +{ + for (unsigned i = 0; i < m_constantRegisters.size(); i++) { + if (m_constantRegisters[i].get().isEmpty()) + continue; + if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) { + ConcurrentJITLocker locker(symbolTable->m_lock); + auto end = symbolTable->end(locker); + for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) { + if (ptr->value.varOffset() == VarOffset(virtualRegister)) { + // FIXME: This won't work from the compilation thread. + // https://bugs.webkit.org/show_bug.cgi?id=115300 + return ptr->key.get(); + } + } + } + } + if (virtualRegister == thisRegister()) + return ASCIILiteral("this"); + if (virtualRegister.isArgument()) + return String::format("arguments[%3d]", virtualRegister.toArgument()); + + return ""; +} + +ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) +{ + ValueProfile* result = binarySearch<ValueProfile, int>( + m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, + getValueProfileBytecodeOffset<ValueProfile>); + ASSERT(result->m_bytecodeOffset != -1); + ASSERT(instructions()[bytecodeOffset + opcodeLength( + m_vm->interpreter->getOpcodeID( + instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result); + return result; +} + +void CodeBlock::validate() +{ + BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. + + FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0); + + if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) { + beginValidationDidFail(); + dataLog(" Wrong number of bits in result!\n"); + dataLog(" Result: ", liveAtHead, "\n"); + dataLog(" Bit count: ", liveAtHead.numBits(), "\n"); + endValidationDidFail(); + } + + for (unsigned i = m_numCalleeRegisters; i--;) { + VirtualRegister reg = virtualRegisterForLocal(i); + + if (liveAtHead.get(i)) { + beginValidationDidFail(); + dataLog(" Variable ", reg, " is expected to be dead.\n"); + dataLog(" Result: ", liveAtHead, "\n"); + endValidationDidFail(); + } + } +} + +void CodeBlock::beginValidationDidFail() +{ + dataLog("Validation failure in ", *this, ":\n"); + dataLog("\n"); +} + +void CodeBlock::endValidationDidFail() +{ + dataLog("\n"); + dumpBytecode(); + dataLog("\n"); + dataLog("Validation failure.\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +void CodeBlock::addBreakpoint(unsigned numBreakpoints) +{ + m_numBreakpoints += numBreakpoints; + ASSERT(m_numBreakpoints); + if (JITCode::isOptimizingJIT(jitType())) + jettison(Profiler::JettisonDueToDebuggerBreakpoint); +} + +void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode) +{ + m_steppingMode = mode; + if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType())) + jettison(Profiler::JettisonDueToDebuggerStepping); +} + +RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset) +{ + return tryBinarySearch<RareCaseProfile, int>( + m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset, + getRareCaseProfileBytecodeOffset); +} + +#if ENABLE(JIT) +DFG::CapabilityLevel CodeBlock::capabilityLevel() +{ + DFG::CapabilityLevel result = capabilityLevelInternal(); + m_capabilityLevelState = result; + return result; +} +#endif + +void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions) +{ + const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); + for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { + // Because op_profile_control_flow is emitted at the beginning of every basic block, finding + // the next op_profile_control_flow will give us the text range of a single basic block. + size_t startIdx = bytecodeOffsets[i]; + RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx].u.opcode) == op_profile_control_flow); + int basicBlockStartOffset = instructions[startIdx + 1].u.operand; + int basicBlockEndOffset; + if (i + 1 < offsetsLength) { + size_t endIdx = bytecodeOffsets[i + 1]; + RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx].u.opcode) == op_profile_control_flow); + basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1; + } else { + basicBlockEndOffset = m_sourceOffset + m_ownerExecutable->source().length() - 1; // Offset before the closing brace. + basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. + } + + // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more + // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than + // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node + // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different + // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript + // program. The condition: + // (basicBlockEndOffset < basicBlockStartOffset) + // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic + // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These + // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same + // internal data structure, so if any of them execute, it will record the same textual basic block in the + // JavaScript program as executing. + // At the bytecode level, this situation looks like: + // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset) + // ... + // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m). + // ... + // m: op_profile_control_flow + if (basicBlockEndOffset < basicBlockStartOffset) { + RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. + instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); + continue; + } + + BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(m_ownerExecutable->sourceID(), basicBlockStartOffset, basicBlockEndOffset); + + // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset] + // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation. + // This is necessary because in the original source text of a JavaScript program, + // function literals form new basic blocks boundaries, but they aren't represented + // inside the CodeBlock's instruction stream. + auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) { + const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable(); + int functionStart = executable->typeProfilingStartOffset(); + int functionEnd = executable->typeProfilingEndOffset(); + if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset) + basicBlockLocation->insertGap(functionStart, functionEnd); + }; + + for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls) + insertFunctionGaps(executable); + for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) + insertFunctionGaps(executable); + + instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation; + } +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h new file mode 100644 index 000000000..9c78eed13 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -0,0 +1,1243 @@ +/* + * Copyright (C) 2008-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeBlock_h +#define CodeBlock_h + +#include "ArrayProfile.h" +#include "ByValInfo.h" +#include "BytecodeConventions.h" +#include "BytecodeLivenessAnalysis.h" +#include "CallLinkInfo.h" +#include "CallReturnOffsetToBytecodeOffset.h" +#include "CodeBlockHash.h" +#include "CodeBlockSet.h" +#include "ConcurrentJITLock.h" +#include "CodeOrigin.h" +#include "CodeType.h" +#include "CompactJITCodeMap.h" +#include "DFGCommon.h" +#include "DFGCommonData.h" +#include "DFGExitProfile.h" +#include "DeferredCompilationCallback.h" +#include "EvalCodeCache.h" +#include "ExecutionCounter.h" +#include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" +#include "ObjectAllocationProfile.h" +#include "Options.h" +#include "PutPropertySlot.h" +#include "Instruction.h" +#include "JITCode.h" +#include "JITWriteBarrier.h" +#include "JSGlobalObject.h" +#include "JumpTable.h" +#include "LLIntCallLinkInfo.h" +#include "LazyOperandValueProfile.h" +#include "ProfilerCompilation.h" +#include "ProfilerJettisonReason.h" +#include "RegExpObject.h" +#include "StructureStubInfo.h" +#include "UnconditionalFinalizer.h" +#include "ValueProfile.h" +#include "VirtualRegister.h" +#include "Watchpoint.h" +#include <wtf/Bag.h> +#include <wtf/FastMalloc.h> +#include <wtf/RefCountedArray.h> +#include <wtf/RefPtr.h> +#include <wtf/SegmentedVector.h> +#include <wtf/Vector.h> +#include <wtf/text/WTFString.h> + +namespace JSC { + +class ExecState; +class LLIntOffsetsExtractor; +class RepatchBuffer; +class TypeLocation; + +enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; + +class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester { + WTF_MAKE_FAST_ALLOCATED; + friend class BytecodeLivenessAnalysis; + friend class JIT; + friend class LLIntOffsetsExtractor; +public: + enum CopyParsedBlockTag { CopyParsedBlock }; +protected: + CodeBlock(CopyParsedBlockTag, CodeBlock& other); + + CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset); + + WriteBarrier<JSGlobalObject> m_globalObject; + Heap* m_heap; + +public: + JS_EXPORT_PRIVATE virtual ~CodeBlock(); + + UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } + + CString inferredName() const; + CodeBlockHash hash() const; + bool hasHash() const; + bool isSafeToComputeHash() const; + CString hashAsStringIfPossible() const; + CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. + CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. + void dumpAssumingJITType(PrintStream&, JITCode::JITType) const; + void dump(PrintStream&) const; + + int numParameters() const { return m_numParameters; } + void setNumParameters(int newValue); + + int* addressOfNumParameters() { return &m_numParameters; } + static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } + + CodeBlock* alternative() { return m_alternative.get(); } + PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); } + void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; } + + template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor) + { + Functor f(std::forward<Functor>(functor)); + Vector<CodeBlock*, 4> codeBlocks; + codeBlocks.append(this); + + while (!codeBlocks.isEmpty()) { + CodeBlock* currentCodeBlock = codeBlocks.takeLast(); + f(currentCodeBlock); + + if (CodeBlock* alternative = currentCodeBlock->alternative()) + codeBlocks.append(alternative); + if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull()) + codeBlocks.append(osrEntryBlock); + } + } + + CodeSpecializationKind specializationKind() const + { + return specializationFromIsConstruct(m_isConstructor); + } + + CodeBlock* baselineAlternative(); + + // FIXME: Get rid of this. + // https://bugs.webkit.org/show_bug.cgi?id=123677 + CodeBlock* baselineVersion(); + + void visitAggregate(SlotVisitor&); + + void dumpSource(); + void dumpSource(PrintStream&); + + void dumpBytecode(); + void dumpBytecode(PrintStream&); + void dumpBytecode( + PrintStream&, unsigned bytecodeOffset, + const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); + void printStructures(PrintStream&, const Instruction*); + void printStructure(PrintStream&, const char* name, const Instruction*, int operand); + + bool isStrictMode() const { return m_isStrictMode; } + ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; } + + inline bool isKnownNotImmediate(int index) + { + if (index == m_thisRegister.offset() && !m_isStrictMode) + return true; + + if (isConstantRegisterIndex(index)) + return getConstant(index).isCell(); + + return false; + } + + ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) + { + return index >= m_numVars; + } + + enum class RequiredHandler { + CatchHandler, + AnyHandler + }; + HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler); + unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset); + unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset); + void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, + int& startOffset, int& endOffset, unsigned& line, unsigned& column); + + void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result); + void getStubInfoMap(StubInfoMap& result); + + void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result); + void getCallLinkInfoMap(CallLinkInfoMap& result); + + void getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result); + void getByValInfoMap(ByValInfoMap& result); + +#if ENABLE(JIT) + StructureStubInfo* addStubInfo(); + Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); } + Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); } + + // O(n) operation. Use getStubInfoMap() unless you really only intend to get one + // stub info. + StructureStubInfo* findStubInfo(CodeOrigin); + + void resetStub(StructureStubInfo&); + + ByValInfo* addByValInfo(); + + CallLinkInfo* addCallLinkInfo(); + Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); } + Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); } + + // This is a slow function call used primarily for compiling OSR exits in the case + // that there had been inlining. Chances are if you want to use this, you're really + // looking for a CallLinkInfoMap to amortize the cost of calling this. + CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); +#endif // ENABLE(JIT) + + void unlinkIncomingCalls(); + +#if ENABLE(JIT) + void unlinkCalls(); + + void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*); + void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*); +#endif // ENABLE(JIT) + + void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*); + + void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap) + { + m_jitCodeMap = WTF::move(jitCodeMap); + } + CompactJITCodeMap* jitCodeMap() + { + return m_jitCodeMap.get(); + } + + unsigned bytecodeOffset(Instruction* returnAddress) + { + RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); + return static_cast<Instruction*>(returnAddress) - instructions().begin(); + } + + unsigned numberOfInstructions() const { return m_instructions.size(); } + RefCountedArray<Instruction>& instructions() { return m_instructions; } + const RefCountedArray<Instruction>& instructions() const { return m_instructions; } + + size_t predictedMachineCodeSize(); + + bool usesOpcode(OpcodeID); + + unsigned instructionCount() const { return m_instructions.size(); } + + // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock); + void install(); + + // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) + PassRefPtr<CodeBlock> newReplacement(); + + void setJITCode(PassRefPtr<JITCode> code) + { + ASSERT(m_heap->isDeferred()); + m_heap->reportExtraMemoryAllocated(code->size()); + ConcurrentJITLocker locker(m_lock); + WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid. + m_jitCode = code; + } + PassRefPtr<JITCode> jitCode() { return m_jitCode; } + JITCode::JITType jitType() const + { + JITCode* jitCode = m_jitCode.get(); + WTF::loadLoadFence(); + JITCode::JITType result = JITCode::jitTypeFor(jitCode); + WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good. + return result; + } + + bool hasBaselineJITProfiling() const + { + return jitType() == JITCode::BaselineJIT; + } + +#if ENABLE(JIT) + virtual CodeBlock* replacement() = 0; + + virtual DFG::CapabilityLevel capabilityLevelInternal() = 0; + DFG::CapabilityLevel capabilityLevel(); + DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; } + + bool hasOptimizedReplacement(JITCode::JITType typeToReplace); + bool hasOptimizedReplacement(); // the typeToReplace is my JITType +#endif + + void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr); + + ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } + + void setVM(VM* vm) { m_vm = vm; } + VM* vm() { return m_vm; } + + void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; } + VirtualRegister thisRegister() const { return m_thisRegister; } + + bool usesEval() const { return m_unlinkedCode->usesEval(); } + + void setScopeRegister(VirtualRegister scopeRegister) + { + ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid()); + m_scopeRegister = scopeRegister; + } + + VirtualRegister scopeRegister() const + { + return m_scopeRegister; + } + + void setActivationRegister(VirtualRegister activationRegister) + { + m_lexicalEnvironmentRegister = activationRegister; + } + + VirtualRegister activationRegister() const + { + ASSERT(m_lexicalEnvironmentRegister.isValid()); + return m_lexicalEnvironmentRegister; + } + + VirtualRegister uncheckedActivationRegister() + { + return m_lexicalEnvironmentRegister; + } + + bool needsActivation() const + { + ASSERT(m_lexicalEnvironmentRegister.isValid() == m_needsActivation); + return m_needsActivation; + } + + CodeType codeType() const { return m_unlinkedCode->codeType(); } + PutPropertySlot::Context putByIdContext() const + { + if (codeType() == EvalCode) + return PutPropertySlot::PutByIdEval; + return PutPropertySlot::PutById; + } + + SourceProvider* source() const { return m_source.get(); } + unsigned sourceOffset() const { return m_sourceOffset; } + unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; } + + size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } + unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } + + String nameForRegister(VirtualRegister); + + unsigned numberOfArgumentValueProfiles() + { + ASSERT(m_numParameters >= 0); + ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters)); + return m_argumentValueProfiles.size(); + } + ValueProfile* valueProfileForArgument(unsigned argumentIndex) + { + ValueProfile* result = &m_argumentValueProfiles[argumentIndex]; + ASSERT(result->m_bytecodeOffset == -1); + return result; + } + + unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } + ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; } + ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset); + SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset) + { + return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker); + } + + unsigned totalNumberOfValueProfiles() + { + return numberOfArgumentValueProfiles() + numberOfValueProfiles(); + } + ValueProfile* getFromAllValueProfiles(unsigned index) + { + if (index < numberOfArgumentValueProfiles()) + return valueProfileForArgument(index); + return valueProfile(index - numberOfArgumentValueProfiles()); + } + + RareCaseProfile* addRareCaseProfile(int bytecodeOffset) + { + m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); + return &m_rareCaseProfiles.last(); + } + unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } + RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; } + RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset); + + bool likelyToTakeSlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + bool couldTakeSlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return value >= Options::couldTakeSlowCaseMinimumCount(); + } + + RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) + { + m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); + return &m_specialFastCaseProfiles.last(); + } + unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } + RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } + RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) + { + return tryBinarySearch<RareCaseProfile, int>( + m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset, + getRareCaseProfileBytecodeOffset); + } + + bool likelyToTakeSpecialFastCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + bool couldTakeSpecialFastCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount(); + } + + bool likelyToTakeDeepestSlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = slowCaseCount - specialFastCaseCount; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + bool likelyToTakeAnySlowCase(int bytecodeOffset) + { + if (!hasBaselineJITProfiling()) + return false; + unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + unsigned value = slowCaseCount + specialFastCaseCount; + return value >= Options::likelyToTakeSlowCaseMinimumCount(); + } + + unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } + const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } + ArrayProfile* addArrayProfile(unsigned bytecodeOffset) + { + m_arrayProfiles.append(ArrayProfile(bytecodeOffset)); + return &m_arrayProfiles.last(); + } + ArrayProfile* getArrayProfile(unsigned bytecodeOffset); + ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); + + // Exception handling support + + size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } + HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } + + bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } + +#if ENABLE(DFG_JIT) + Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins() + { + return m_jitCode->dfgCommon()->codeOrigins; + } + + // Having code origins implies that there has been some inlining. + bool hasCodeOrigins() + { + return JITCode::isOptimizingJIT(jitType()); + } + + bool canGetCodeOrigin(unsigned index) + { + if (!hasCodeOrigins()) + return false; + return index < codeOrigins().size(); + } + + CodeOrigin codeOrigin(unsigned index) + { + return codeOrigins()[index]; + } + + bool addFrequentExitSite(const DFG::FrequentExitSite& site) + { + ASSERT(JITCode::isBaselineCode(jitType())); + ConcurrentJITLocker locker(m_lock); + return m_exitProfile.add(locker, site); + } + + bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const + { + return m_exitProfile.hasExitSite(locker, site); + } + bool hasExitSite(const DFG::FrequentExitSite& site) const + { + ConcurrentJITLocker locker(m_lock); + return hasExitSite(locker, site); + } + + DFG::ExitProfile& exitProfile() { return m_exitProfile; } + + CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles() + { + return m_lazyOperandValueProfiles; + } +#endif // ENABLE(DFG_JIT) + + // Constant Pool +#if ENABLE(DFG_JIT) + size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); } + size_t numberOfDFGIdentifiers() const + { + if (!JITCode::isOptimizingJIT(jitType())) + return 0; + + return m_jitCode->dfgCommon()->dfgIdentifiers.size(); + } + + const Identifier& identifier(int index) const + { + size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); + if (static_cast<unsigned>(index) < unlinkedIdentifiers) + return m_unlinkedCode->identifier(index); + ASSERT(JITCode::isOptimizingJIT(jitType())); + return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; + } +#else + size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); } + const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); } +#endif + + Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; } + Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; } + unsigned addConstant(JSValue v) + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v); + m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); + return result; + } + + unsigned addConstantLazily() + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); + return result; + } + + WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } + ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } + ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } + ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; } + + FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } + int numberOfFunctionDecls() { return m_functionDecls.size(); } + FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } + + RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); } + + unsigned numberOfConstantBuffers() const + { + if (!m_rareData) + return 0; + return m_rareData->m_constantBuffers.size(); + } + unsigned addConstantBuffer(const Vector<JSValue>& buffer) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_constantBuffers.size(); + m_rareData->m_constantBuffers.append(buffer); + return size; + } + + Vector<JSValue>& constantBufferAsVector(unsigned index) + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + JSValue* constantBuffer(unsigned index) + { + return constantBufferAsVector(index).data(); + } + + Heap* heap() const { return m_heap; } + JSGlobalObject* globalObject() { return m_globalObject.get(); } + + JSGlobalObject* globalObjectFor(CodeOrigin); + + BytecodeLivenessAnalysis& livenessAnalysis() + { + { + ConcurrentJITLocker locker(m_lock); + if (!!m_livenessAnalysis) + return *m_livenessAnalysis; + } + std::unique_ptr<BytecodeLivenessAnalysis> analysis = + std::make_unique<BytecodeLivenessAnalysis>(this); + { + ConcurrentJITLocker locker(m_lock); + if (!m_livenessAnalysis) + m_livenessAnalysis = WTF::move(analysis); + return *m_livenessAnalysis; + } + } + + void validate(); + + // Jump Tables + + size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } + SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } + SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } + void clearSwitchJumpTables() + { + if (!m_rareData) + return; + m_rareData->m_switchJumpTables.clear(); + } + + size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } + StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } + StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } + + EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } + + enum ShrinkMode { + // Shrink prior to generating machine code that may point directly into vectors. + EarlyShrink, + + // Shrink after generating machine code, and after possibly creating new vectors + // and appending to others. At this time it is not safe to shrink certain vectors + // because we would have generated machine code that references them directly. + LateShrink + }; + void shrinkToFit(ShrinkMode); + + // Functions for controlling when JITting kicks in, in a mixed mode + // execution world. + + bool checkIfJITThresholdReached() + { + return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); + } + + void dontJITAnytimeSoon() + { + m_llintExecuteCounter.deferIndefinitely(); + } + + void jitAfterWarmUp() + { + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); + } + + void jitSoon() + { + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); + } + + const BaselineExecutionCounter& llintExecuteCounter() const + { + return m_llintExecuteCounter; + } + + // Functions for controlling when tiered compilation kicks in. This + // controls both when the optimizing compiler is invoked and when OSR + // entry happens. Two triggers exist: the loop trigger and the return + // trigger. In either case, when an addition to m_jitExecuteCounter + // causes it to become non-negative, the optimizing compiler is + // invoked. This includes a fast check to see if this CodeBlock has + // already been optimized (i.e. replacement() returns a CodeBlock + // that was optimized with a higher tier JIT than this one). In the + // case of the loop trigger, if the optimized compilation succeeds + // (or has already succeeded in the past) then OSR is attempted to + // redirect program flow into the optimized code. + + // These functions are called from within the optimization triggers, + // and are used as a single point at which we define the heuristics + // for how much warm-up is mandated before the next optimization + // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), + // as this is called from the CodeBlock constructor. + + // When we observe a lot of speculation failures, we trigger a + // reoptimization. But each time, we increase the optimization trigger + // to avoid thrashing. + JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const; + void countReoptimization(); +#if ENABLE(JIT) + unsigned numberOfDFGCompiles(); + + int32_t codeTypeThresholdMultiplier() const; + + int32_t adjustedCounterValue(int32_t desiredThreshold); + + int32_t* addressOfJITExecuteCounter() + { + return &m_jitExecuteCounter.m_counter; + } + + static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); } + static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); } + static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); } + + const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } + + unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } + + // Check if the optimization threshold has been reached, and if not, + // adjust the heuristics accordingly. Returns true if the threshold has + // been reached. + bool checkIfOptimizationThresholdReached(); + + // Call this to force the next optimization trigger to fire. This is + // rarely wise, since optimization triggers are typically more + // expensive than executing baseline code. + void optimizeNextInvocation(); + + // Call this to prevent optimization from happening again. Note that + // optimization will still happen after roughly 2^29 invocations, + // so this is really meant to delay that as much as possible. This + // is called if optimization failed, and we expect it to fail in + // the future as well. + void dontOptimizeAnytimeSoon(); + + // Call this to reinitialize the counter to its starting state, + // forcing a warm-up to happen before the next optimization trigger + // fires. This is called in the CodeBlock constructor. It also + // makes sense to call this if an OSR exit occurred. Note that + // OSR exit code is code generated, so the value of the execute + // counter that this corresponds to is also available directly. + void optimizeAfterWarmUp(); + + // Call this to force an optimization trigger to fire only after + // a lot of warm-up. + void optimizeAfterLongWarmUp(); + + // Call this to cause an optimization trigger to fire soon, but + // not necessarily the next one. This makes sense if optimization + // succeeds. Successfuly optimization means that all calls are + // relinked to the optimized code, so this only affects call + // frames that are still executing this CodeBlock. The value here + // is tuned to strike a balance between the cost of OSR entry + // (which is too high to warrant making every loop back edge to + // trigger OSR immediately) and the cost of executing baseline + // code (which is high enough that we don't necessarily want to + // have a full warm-up). The intuition for calling this instead of + // optimizeNextInvocation() is for the case of recursive functions + // with loops. Consider that there may be N call frames of some + // recursive function, for a reasonably large value of N. The top + // one triggers optimization, and then returns, and then all of + // the others return. We don't want optimization to be triggered on + // each return, as that would be superfluous. It only makes sense + // to trigger optimization if one of those functions becomes hot + // in the baseline code. + void optimizeSoon(); + + void forceOptimizationSlowPathConcurrently(); + + void setOptimizationThresholdBasedOnCompilationResult(CompilationResult); + + uint32_t osrExitCounter() const { return m_osrExitCounter; } + + void countOSRExit() { m_osrExitCounter++; } + + uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } + + static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } + + uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold); + uint32_t exitCountThresholdForReoptimization(); + uint32_t exitCountThresholdForReoptimizationFromLoop(); + bool shouldReoptimizeNow(); + bool shouldReoptimizeFromLoopNow(); +#else // No JIT + void optimizeAfterWarmUp() { } + unsigned numberOfDFGCompiles() { return 0; } +#endif + + bool shouldOptimizeNow(); + void updateAllValueProfilePredictions(); + void updateAllArrayPredictions(); + void updateAllPredictions(); + + unsigned frameRegisterCount(); + int stackPointerOffset(); + + bool hasOpDebugForLineAndColumn(unsigned line, unsigned column); + + bool hasDebuggerRequests() const { return m_debuggerRequests; } + void* debuggerRequestsAddress() { return &m_debuggerRequests; } + + void addBreakpoint(unsigned numBreakpoints); + void removeBreakpoint(unsigned numBreakpoints) + { + ASSERT(m_numBreakpoints >= numBreakpoints); + m_numBreakpoints -= numBreakpoints; + } + + enum SteppingMode { + SteppingModeDisabled, + SteppingModeEnabled + }; + void setSteppingMode(SteppingMode); + + void clearDebuggerRequests() + { + m_steppingMode = SteppingModeDisabled; + m_numBreakpoints = 0; + } + + // FIXME: Make these remaining members private. + + int m_numCalleeRegisters; + int m_numVars; + bool m_isConstructor : 1; + + // This is intentionally public; it's the responsibility of anyone doing any + // of the following to hold the lock: + // + // - Modifying any inline cache in this code block. + // + // - Quering any inline cache in this code block, from a thread other than + // the main thread. + // + // Additionally, it's only legal to modify the inline cache on the main + // thread. This means that the main thread can query the inline cache without + // locking. This is crucial since executing the inline cache is effectively + // "querying" it. + // + // Another exception to the rules is that the GC can do whatever it wants + // without holding any locks, because the GC is guaranteed to wait until any + // concurrent compilation threads finish what they're doing. + mutable ConcurrentJITLock m_lock; + + bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it. + bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC. + + bool m_didFailFTLCompilation : 1; + bool m_hasBeenCompiledWithFTL : 1; + + // Internal methods for use by validation code. It would be private if it wasn't + // for the fact that we use it from anonymous namespaces. + void beginValidationDidFail(); + NO_RETURN_DUE_TO_CRASH void endValidationDidFail(); + + bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live. + + struct RareData { + WTF_MAKE_FAST_ALLOCATED; + public: + Vector<HandlerInfo> m_exceptionHandlers; + + // Buffers used for large array literals + Vector<Vector<JSValue>> m_constantBuffers; + + // Jump Tables + Vector<SimpleJumpTable> m_switchJumpTables; + Vector<StringJumpTable> m_stringSwitchJumpTables; + + EvalCodeCache m_evalCodeCache; + }; + +protected: + virtual void visitWeakReferences(SlotVisitor&) override; + virtual void finalizeUnconditionally() override; + +#if ENABLE(DFG_JIT) + void tallyFrequentExitSites(); +#else + void tallyFrequentExitSites() { } +#endif + +private: + friend class CodeBlockSet; + + CodeBlock* specialOSREntryBlockOrNull(); + + void noticeIncomingCall(ExecState* callerFrame); + + double optimizationThresholdScalingFactor(); + + void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); + + void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation) + { + ASSERT(constants.size() == constantsSourceCodeRepresentation.size()); + size_t count = constants.size(); + m_constantRegisters.resizeToFit(count); + for (size_t i = 0; i < count; i++) + m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get()); + m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation; + } + + void replaceConstant(int index, JSValue value) + { + ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size()); + m_constantRegisters[index - FirstConstantRegisterIndex].set(m_globalObject->vm(), m_ownerExecutable.get(), value); + } + + void dumpBytecode( + PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, + const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); + + CString registerName(int r) const; + CString constantName(int index) const; + void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op); + void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&); + void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&); + enum CacheDumpMode { DumpCaches, DontDumpCaches }; + void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&); + void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printPutByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&); + void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); + void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand); + + void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling); + void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); + void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); + void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling); + + bool shouldImmediatelyAssumeLivenessDuringScan(); + + void propagateTransitions(SlotVisitor&); + void determineLiveness(SlotVisitor&); + + void stronglyVisitStrongReferences(SlotVisitor&); + void stronglyVisitWeakReferences(SlotVisitor&); + + void createRareDataIfNecessary() + { + if (!m_rareData) + m_rareData = std::make_unique<RareData>(); + } + + void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&); + +#if ENABLE(JIT) + void resetStubInternal(RepatchBuffer&, StructureStubInfo&); + void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&); +#endif + WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode; + int m_numParameters; + union { + unsigned m_debuggerRequests; + struct { + unsigned m_hasDebuggerStatement : 1; + unsigned m_steppingMode : 1; + unsigned m_numBreakpoints : 30; + }; + }; + WriteBarrier<ScriptExecutable> m_ownerExecutable; + VM* m_vm; + + RefCountedArray<Instruction> m_instructions; + VirtualRegister m_thisRegister; + VirtualRegister m_scopeRegister; + VirtualRegister m_lexicalEnvironmentRegister; + + bool m_isStrictMode; + bool m_needsActivation; + bool m_mayBeExecuting; + Atomic<bool> m_visitAggregateHasBeenCalled; + + RefPtr<SourceProvider> m_source; + unsigned m_sourceOffset; + unsigned m_firstLineColumnOffset; + unsigned m_codeType; + + Vector<LLIntCallLinkInfo> m_llintCallLinkInfos; + SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls; + RefPtr<JITCode> m_jitCode; +#if ENABLE(JIT) + Bag<StructureStubInfo> m_stubInfos; + Bag<ByValInfo> m_byValInfos; + Bag<CallLinkInfo> m_callLinkInfos; + SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls; + SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls; +#endif + std::unique_ptr<CompactJITCodeMap> m_jitCodeMap; +#if ENABLE(DFG_JIT) + // This is relevant to non-DFG code blocks that serve as the profiled code block + // for DFG code blocks. + DFG::ExitProfile m_exitProfile; + CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; +#endif + Vector<ValueProfile> m_argumentValueProfiles; + Vector<ValueProfile> m_valueProfiles; + SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; + SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles; + Vector<ArrayAllocationProfile> m_arrayAllocationProfiles; + ArrayProfileVector m_arrayProfiles; + Vector<ObjectAllocationProfile> m_objectAllocationProfiles; + + // Constant Pool + COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown); + // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates + // it, so we're stuck with it for now. + Vector<WriteBarrier<Unknown>> m_constantRegisters; + Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation; + Vector<WriteBarrier<FunctionExecutable>> m_functionDecls; + Vector<WriteBarrier<FunctionExecutable>> m_functionExprs; + + RefPtr<CodeBlock> m_alternative; + + BaselineExecutionCounter m_llintExecuteCounter; + + BaselineExecutionCounter m_jitExecuteCounter; + int32_t m_totalJITExecutions; + uint32_t m_osrExitCounter; + uint16_t m_optimizationDelayCounter; + uint16_t m_reoptimizationRetryCounter; + + mutable CodeBlockHash m_hash; + + std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis; + + std::unique_ptr<RareData> m_rareData; +#if ENABLE(JIT) + DFG::CapabilityLevel m_capabilityLevelState; +#endif +}; + +// Program code is not marked by any function, so we make the global object +// responsible for marking it. + +class GlobalCodeBlock : public CodeBlock { +protected: + GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other) + : CodeBlock(CopyParsedBlock, other) + { + } + + GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) + { + } +}; + +class ProgramCodeBlock : public GlobalCodeBlock { +public: + ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other) + : GlobalCodeBlock(CopyParsedBlock, other) + { + } + + ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset) + { + } + +#if ENABLE(JIT) +protected: + virtual CodeBlock* replacement() override; + virtual DFG::CapabilityLevel capabilityLevelInternal() override; +#endif +}; + +class EvalCodeBlock : public GlobalCodeBlock { +public: + EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) + : GlobalCodeBlock(CopyParsedBlock, other) + { + } + + EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1) + { + } + + const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); } + unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); } + +#if ENABLE(JIT) +protected: + virtual CodeBlock* replacement() override; + virtual DFG::CapabilityLevel capabilityLevelInternal() override; +#endif + +private: + UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); } +}; + +class FunctionCodeBlock : public CodeBlock { +public: + FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other) + : CodeBlock(CopyParsedBlock, other) + { + } + + FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) + { + } + +#if ENABLE(JIT) +protected: + virtual CodeBlock* replacement() override; + virtual DFG::CapabilityLevel capabilityLevelInternal() override; +#endif +}; + +inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) +{ + RELEASE_ASSERT(inlineCallFrame); + ExecutableBase* executable = inlineCallFrame->executable.get(); + RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info()); + return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->specializationKind()); +} + +inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) +{ + if (codeOrigin.inlineCallFrame) + return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); + return baselineCodeBlock; +} + +inline Register& ExecState::r(int index) +{ + CodeBlock* codeBlock = this->codeBlock(); + if (codeBlock->isConstantRegisterIndex(index)) + return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index)); + return this[index]; +} + +inline Register& ExecState::r(VirtualRegister reg) +{ + return r(reg.offset()); +} + +inline Register& ExecState::uncheckedR(int index) +{ + RELEASE_ASSERT(index < FirstConstantRegisterIndex); + return this[index]; +} + +inline Register& ExecState::uncheckedR(VirtualRegister reg) +{ + return uncheckedR(reg.offset()); +} + +inline void CodeBlockSet::mark(void* candidateCodeBlock) +{ + // We have to check for 0 and -1 because those are used by the HashMap as markers. + uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock); + + // This checks for both of those nasty cases in one go. + // 0 + 1 = 1 + // -1 + 1 = 0 + if (value + 1 <= 1) + return; + + CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock); + if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock)) + return; + + mark(codeBlock); +} + +inline void CodeBlockSet::mark(CodeBlock* codeBlock) +{ + if (!codeBlock) + return; + + if (codeBlock->m_mayBeExecuting) + return; + + codeBlock->m_mayBeExecuting = true; + // We might not have cleared the marks for this CodeBlock, but we need to visit it. + codeBlock->m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed); +#if ENABLE(GGC) + m_currentlyExecuting.append(codeBlock); +#endif +} + +template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor) +{ + switch (type()) { + case ProgramExecutableType: { + if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get()) + codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); + break; + } + + case EvalExecutableType: { + if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get()) + codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); + break; + } + + case FunctionExecutableType: { + Functor f(std::forward<Functor>(functor)); + FunctionExecutable* executable = jsCast<FunctionExecutable*>(this); + if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get()) + codeBlock->forEachRelatedCodeBlock(f); + if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get()) + codeBlock->forEachRelatedCodeBlock(f); + break; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +} // namespace JSC + +#endif // CodeBlock_h diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp b/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp new file mode 100644 index 000000000..87c092f64 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CodeBlockHash.h" + +#include "SourceCode.h" +#include <wtf/SHA1.h> +#include <wtf/SixCharacterHash.h> + +namespace JSC { + +CodeBlockHash::CodeBlockHash(const char* string) + : m_hash(sixCharacterHashStringToInteger(string)) +{ +} + +CodeBlockHash::CodeBlockHash(const SourceCode& sourceCode, CodeSpecializationKind kind) + : m_hash(0) +{ + SHA1 sha1; + sha1.addBytes(sourceCode.toUTF8()); + SHA1::Digest digest; + sha1.computeHash(digest); + m_hash += digest[0] | (digest[1] << 8) | (digest[2] << 16) | (digest[3] << 24); + m_hash ^= static_cast<unsigned>(kind); + + // Ensure that 0 corresponds to the hash not having been computed. + if (!m_hash) + m_hash = 1; +} + +void CodeBlockHash::dump(PrintStream& out) const +{ + std::array<char, 7> buffer = integerToSixCharacterHashString(m_hash); + +#if !ASSERT_DISABLED + CodeBlockHash recompute(buffer.data()); + ASSERT(recompute == *this); +#endif // !ASSERT_DISABLED + + out.print(buffer.data()); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/CodeBlockHash.h b/Source/JavaScriptCore/bytecode/CodeBlockHash.h new file mode 100644 index 000000000..4e3398867 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlockHash.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeBlockHash_h +#define CodeBlockHash_h + +#include "CodeSpecializationKind.h" +#include <wtf/PrintStream.h> + +// CodeBlock hashes are useful for informally identifying code blocks. They correspond +// to the low 32 bits of a SHA1 hash of the source code with two low bit flipped +// according to the role that the code block serves (call, construct). Additionally, the +// hashes are typically operated over using a string in which the hash is transformed +// into a 6-byte alphanumeric representation. This can be retrieved by using +// toString(const CodeBlockHash&). Finally, we support CodeBlockHashes for native +// functions, in which case the hash is replaced by the function address. + +namespace JSC { + +class SourceCode; + +class CodeBlockHash { +public: + CodeBlockHash() + : m_hash(0) + { + } + + explicit CodeBlockHash(unsigned hash) + : m_hash(hash) + { + } + + CodeBlockHash(const SourceCode&, CodeSpecializationKind); + + explicit CodeBlockHash(const char*); + + bool isSet() const { return !!m_hash; } + bool operator!() const { return !isSet(); } + + unsigned hash() const { return m_hash; } + + void dump(PrintStream&) const; + + // Comparison methods useful for bisection. + bool operator==(const CodeBlockHash& other) const { return hash() == other.hash(); } + bool operator!=(const CodeBlockHash& other) const { return hash() != other.hash(); } + bool operator<(const CodeBlockHash& other) const { return hash() < other.hash(); } + bool operator>(const CodeBlockHash& other) const { return hash() > other.hash(); } + bool operator<=(const CodeBlockHash& other) const { return hash() <= other.hash(); } + bool operator>=(const CodeBlockHash& other) const { return hash() >= other.hash(); } + +private: + unsigned m_hash; +}; + +} // namespace JSC + +#endif // CodeBlockHash_h diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp new file mode 100644 index 000000000..f9c6b1e55 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CodeBlockJettisoningWatchpoint.h" + +#include "CodeBlock.h" +#include "DFGCommon.h" +#include "JSCInlines.h" + +namespace JSC { + +void CodeBlockJettisoningWatchpoint::fireInternal(const FireDetail& detail) +{ + if (DFG::shouldShowDisassembly()) + dataLog("Firing watchpoint ", RawPointer(this), " on ", *m_codeBlock, "\n"); + + m_codeBlock->jettison(Profiler::JettisonDueToUnprofiledWatchpoint, CountReoptimization, &detail); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h new file mode 100644 index 000000000..b5e6dd330 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlockJettisoningWatchpoint.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeBlockJettisoningWatchpoint_h +#define CodeBlockJettisoningWatchpoint_h + +#include "Watchpoint.h" + +namespace JSC { + +class CodeBlock; + +class CodeBlockJettisoningWatchpoint : public Watchpoint { +public: + CodeBlockJettisoningWatchpoint(CodeBlock* codeBlock) + : m_codeBlock(codeBlock) + { + } + +protected: + virtual void fireInternal(const FireDetail&) override; + +private: + CodeBlock* m_codeBlock; +}; + +} // namespace JSC + +#endif // CodeBlockJettisoningWatchpoint_h + diff --git a/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h b/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h new file mode 100644 index 000000000..d87085841 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeBlockWithJITType_h +#define CodeBlockWithJITType_h + +#include "CodeBlock.h" + +namespace JSC { + +// We sometimes what to print the CodeBlock's ID before setting its JITCode. At that +// point the CodeBlock will claim a bogus JITType. This helper class lets us do that. + +class CodeBlockWithJITType { +public: + CodeBlockWithJITType(CodeBlock* codeBlock, JITCode::JITType jitType) + : m_codeBlock(codeBlock) + , m_jitType(jitType) + { + } + + void dump(PrintStream& out) const + { + m_codeBlock->dumpAssumingJITType(out, m_jitType); + } +private: + CodeBlock* m_codeBlock; + JITCode::JITType m_jitType; +}; + +} // namespace JSC + +#endif // CodeBlockWithJITType_h + diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.cpp b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp new file mode 100644 index 000000000..15f759165 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeOrigin.cpp @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CodeOrigin.h" + +#include "CallFrame.h" +#include "CodeBlock.h" +#include "Executable.h" +#include "JSCInlines.h" + +namespace JSC { + +unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame) +{ + unsigned result = 1; + for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame) + result++; + return result; +} + +unsigned CodeOrigin::inlineDepth() const +{ + return inlineDepthForCallFrame(inlineCallFrame); +} + +bool CodeOrigin::isApproximatelyEqualTo(const CodeOrigin& other) const +{ + CodeOrigin a = *this; + CodeOrigin b = other; + + if (!a.isSet()) + return !b.isSet(); + if (!b.isSet()) + return false; + + if (a.isHashTableDeletedValue()) + return b.isHashTableDeletedValue(); + if (b.isHashTableDeletedValue()) + return false; + + for (;;) { + ASSERT(a.isSet()); + ASSERT(b.isSet()); + + if (a.bytecodeIndex != b.bytecodeIndex) + return false; + + if ((!!a.inlineCallFrame) != (!!b.inlineCallFrame)) + return false; + + if (!a.inlineCallFrame) + return true; + + if (a.inlineCallFrame->executable.get() != b.inlineCallFrame->executable.get()) + return false; + + a = a.inlineCallFrame->caller; + b = b.inlineCallFrame->caller; + } +} + +unsigned CodeOrigin::approximateHash() const +{ + if (!isSet()) + return 0; + if (isHashTableDeletedValue()) + return 1; + + unsigned result = 2; + CodeOrigin codeOrigin = *this; + for (;;) { + result += codeOrigin.bytecodeIndex; + + if (!codeOrigin.inlineCallFrame) + return result; + + result += WTF::PtrHash<JSCell*>::hash(codeOrigin.inlineCallFrame->executable.get()); + + codeOrigin = codeOrigin.inlineCallFrame->caller; + } +} + +Vector<CodeOrigin> CodeOrigin::inlineStack() const +{ + Vector<CodeOrigin> result(inlineDepth()); + result.last() = *this; + unsigned index = result.size() - 2; + for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame) + result[index--] = current->caller; + RELEASE_ASSERT(!result[0].inlineCallFrame); + return result; +} + +void CodeOrigin::dump(PrintStream& out) const +{ + if (!isSet()) { + out.print("<none>"); + return; + } + + Vector<CodeOrigin> stack = inlineStack(); + for (unsigned i = 0; i < stack.size(); ++i) { + if (i) + out.print(" --> "); + + if (InlineCallFrame* frame = stack[i].inlineCallFrame) { + out.print(frame->briefFunctionInformation(), ":<", RawPointer(frame->executable.get()), "> "); + if (frame->isClosureCall) + out.print("(closure) "); + } + + out.print("bc#", stack[i].bytecodeIndex); + } +} + +void CodeOrigin::dumpInContext(PrintStream& out, DumpContext*) const +{ + dump(out); +} + +JSFunction* InlineCallFrame::calleeConstant() const +{ + if (calleeRecovery.isConstant()) + return jsCast<JSFunction*>(calleeRecovery.constant()); + return nullptr; +} + +void InlineCallFrame::visitAggregate(SlotVisitor& visitor) +{ + // FIXME: This is an antipattern for two reasons. References introduced by the DFG + // that aren't in the original CodeBlock being compiled should be weakly referenced. + // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also, + // those weak references should already be tracked in the DFG as weak FrozenValues. So, + // there is probably no need for this. We already have assertions that this should be + // unnecessary. Finally, just marking the executable and not anything else in the inline + // call frame is almost certainly insufficient for what this method thought it was going + // to accomplish. + // https://bugs.webkit.org/show_bug.cgi?id=146613 + visitor.append(&executable); +} + +JSFunction* InlineCallFrame::calleeForCallFrame(ExecState* exec) const +{ + return jsCast<JSFunction*>(calleeRecovery.recover(exec)); +} + +CodeBlockHash InlineCallFrame::hash() const +{ + return jsCast<FunctionExecutable*>(executable.get())->codeBlockFor( + specializationKind())->hash(); +} + +CString InlineCallFrame::hashAsStringIfPossible() const +{ + return jsCast<FunctionExecutable*>(executable.get())->codeBlockFor( + specializationKind())->hashAsStringIfPossible(); +} + +CString InlineCallFrame::inferredName() const +{ + return jsCast<FunctionExecutable*>(executable.get())->inferredName().utf8(); +} + +CodeBlock* InlineCallFrame::baselineCodeBlock() const +{ + return jsCast<FunctionExecutable*>(executable.get())->baselineCodeBlockFor(specializationKind()); +} + +void InlineCallFrame::dumpBriefFunctionInformation(PrintStream& out) const +{ + out.print(inferredName(), "#", hashAsStringIfPossible()); +} + +void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const +{ + out.print(briefFunctionInformation(), ":<", RawPointer(executable.get())); + if (executable->isStrictMode()) + out.print(" (StrictMode)"); + out.print(", bc#", caller.bytecodeIndex, ", ", kind); + if (isClosureCall) + out.print(", closure call"); + else + out.print(", known callee: ", inContext(calleeRecovery.constant(), context)); + out.print(", numArgs+this = ", arguments.size()); + out.print(", stackOffset = ", stackOffset); + out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>"); +} + +void InlineCallFrame::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream& out, JSC::InlineCallFrame::Kind kind) +{ + switch (kind) { + case JSC::InlineCallFrame::Call: + out.print("Call"); + return; + case JSC::InlineCallFrame::Construct: + out.print("Construct"); + return; + case JSC::InlineCallFrame::CallVarargs: + out.print("CallVarargs"); + return; + case JSC::InlineCallFrame::ConstructVarargs: + out.print("ConstructVarargs"); + return; + case JSC::InlineCallFrame::GetterCall: + out.print("GetterCall"); + return; + case JSC::InlineCallFrame::SetterCall: + out.print("SetterCall"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h new file mode 100644 index 000000000..d1879a327 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h @@ -0,0 +1,295 @@ +/* + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeOrigin_h +#define CodeOrigin_h + +#include "CodeBlockHash.h" +#include "CodeSpecializationKind.h" +#include "ValueRecovery.h" +#include "WriteBarrier.h" +#include <wtf/BitVector.h> +#include <wtf/HashMap.h> +#include <wtf/PrintStream.h> +#include <wtf/StdLibExtras.h> +#include <wtf/Vector.h> + +namespace JSC { + +struct InlineCallFrame; +class ExecState; +class ScriptExecutable; +class JSFunction; + +struct CodeOrigin { + static const unsigned invalidBytecodeIndex = UINT_MAX; + + // Bytecode offset that you'd use to re-execute this instruction, and the + // bytecode index of the bytecode instruction that produces some result that + // you're interested in (used for mapping Nodes whose values you're using + // to bytecode instructions that have the appropriate value profile). + unsigned bytecodeIndex; + + InlineCallFrame* inlineCallFrame; + + CodeOrigin() + : bytecodeIndex(invalidBytecodeIndex) + , inlineCallFrame(0) + { + } + + CodeOrigin(WTF::HashTableDeletedValueType) + : bytecodeIndex(invalidBytecodeIndex) + , inlineCallFrame(deletedMarker()) + { + } + + explicit CodeOrigin(unsigned bytecodeIndex, InlineCallFrame* inlineCallFrame = 0) + : bytecodeIndex(bytecodeIndex) + , inlineCallFrame(inlineCallFrame) + { + ASSERT(bytecodeIndex < invalidBytecodeIndex); + } + + bool isSet() const { return bytecodeIndex != invalidBytecodeIndex; } + bool operator!() const { return !isSet(); } + + bool isHashTableDeletedValue() const + { + return bytecodeIndex == invalidBytecodeIndex && !!inlineCallFrame; + } + + // The inline depth is the depth of the inline stack, so 1 = not inlined, + // 2 = inlined one deep, etc. + unsigned inlineDepth() const; + + // If the code origin corresponds to inlined code, gives you the heap object that + // would have owned the code if it had not been inlined. Otherwise returns 0. + ScriptExecutable* codeOriginOwner() const; + + int stackOffset() const; + + static unsigned inlineDepthForCallFrame(InlineCallFrame*); + + unsigned hash() const; + bool operator==(const CodeOrigin& other) const; + bool operator!=(const CodeOrigin& other) const { return !(*this == other); } + + // This checks if the two code origins correspond to the same stack trace snippets, + // but ignore whether the InlineCallFrame's are identical. + bool isApproximatelyEqualTo(const CodeOrigin& other) const; + + unsigned approximateHash() const; + + // Get the inline stack. This is slow, and is intended for debugging only. + Vector<CodeOrigin> inlineStack() const; + + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + +private: + static InlineCallFrame* deletedMarker() + { + return bitwise_cast<InlineCallFrame*>(static_cast<uintptr_t>(1)); + } +}; + +struct InlineCallFrame { + enum Kind { + Call, + Construct, + CallVarargs, + ConstructVarargs, + + // For these, the stackOffset incorporates the argument count plus the true return PC + // slot. + GetterCall, + SetterCall + }; + + static Kind kindFor(CodeSpecializationKind kind) + { + switch (kind) { + case CodeForCall: + return Call; + case CodeForConstruct: + return Construct; + } + RELEASE_ASSERT_NOT_REACHED(); + return Call; + } + + static Kind varargsKindFor(CodeSpecializationKind kind) + { + switch (kind) { + case CodeForCall: + return CallVarargs; + case CodeForConstruct: + return ConstructVarargs; + } + RELEASE_ASSERT_NOT_REACHED(); + return Call; + } + + static CodeSpecializationKind specializationKindFor(Kind kind) + { + switch (kind) { + case Call: + case CallVarargs: + case GetterCall: + case SetterCall: + return CodeForCall; + case Construct: + case ConstructVarargs: + return CodeForConstruct; + } + RELEASE_ASSERT_NOT_REACHED(); + return CodeForCall; + } + + static bool isVarargs(Kind kind) + { + switch (kind) { + case CallVarargs: + case ConstructVarargs: + return true; + default: + return false; + } + } + bool isVarargs() const + { + return isVarargs(static_cast<Kind>(kind)); + } + + Vector<ValueRecovery> arguments; // Includes 'this'. + WriteBarrier<ScriptExecutable> executable; + ValueRecovery calleeRecovery; + CodeOrigin caller; + + signed stackOffset : 28; + unsigned kind : 3; // real type is Kind + bool isClosureCall : 1; // If false then we know that callee/scope are constants and the DFG won't treat them as variables, i.e. they have to be recovered manually. + VirtualRegister argumentCountRegister; // Only set when we inline a varargs call. + + // There is really no good notion of a "default" set of values for + // InlineCallFrame's fields. This constructor is here just to reduce confusion if + // we forgot to initialize explicitly. + InlineCallFrame() + : stackOffset(0) + , kind(Call) + , isClosureCall(false) + { + } + + CodeSpecializationKind specializationKind() const { return specializationKindFor(static_cast<Kind>(kind)); } + + JSFunction* calleeConstant() const; + void visitAggregate(SlotVisitor&); + + // Get the callee given a machine call frame to which this InlineCallFrame belongs. + JSFunction* calleeForCallFrame(ExecState*) const; + + CString inferredName() const; + CodeBlockHash hash() const; + CString hashAsStringIfPossible() const; + + CodeBlock* baselineCodeBlock() const; + + void setStackOffset(signed offset) + { + stackOffset = offset; + RELEASE_ASSERT(static_cast<signed>(stackOffset) == offset); + } + + ptrdiff_t callerFrameOffset() const { return stackOffset * sizeof(Register) + CallFrame::callerFrameOffset(); } + ptrdiff_t returnPCOffset() const { return stackOffset * sizeof(Register) + CallFrame::returnPCOffset(); } + + void dumpBriefFunctionInformation(PrintStream&) const; + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + + MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation); +}; + +inline int CodeOrigin::stackOffset() const +{ + if (!inlineCallFrame) + return 0; + + return inlineCallFrame->stackOffset; +} + +inline unsigned CodeOrigin::hash() const +{ + return WTF::IntHash<unsigned>::hash(bytecodeIndex) + + WTF::PtrHash<InlineCallFrame*>::hash(inlineCallFrame); +} + +inline bool CodeOrigin::operator==(const CodeOrigin& other) const +{ + return bytecodeIndex == other.bytecodeIndex + && inlineCallFrame == other.inlineCallFrame; +} + +inline ScriptExecutable* CodeOrigin::codeOriginOwner() const +{ + if (!inlineCallFrame) + return 0; + return inlineCallFrame->executable.get(); +} + +struct CodeOriginHash { + static unsigned hash(const CodeOrigin& key) { return key.hash(); } + static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +struct CodeOriginApproximateHash { + static unsigned hash(const CodeOrigin& key) { return key.approximateHash(); } + static bool equal(const CodeOrigin& a, const CodeOrigin& b) { return a.isApproximatelyEqualTo(b); } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::InlineCallFrame::Kind); + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::CodeOrigin> { + typedef JSC::CodeOriginHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::CodeOrigin> : SimpleClassHashTraits<JSC::CodeOrigin> { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + +#endif // CodeOrigin_h + diff --git a/Source/JavaScriptCore/bytecode/CodeType.cpp b/Source/JavaScriptCore/bytecode/CodeType.cpp new file mode 100644 index 000000000..8b2cad56a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeType.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CodeType.h" + +#include <wtf/PrintStream.h> + +namespace WTF { + +void printInternal(PrintStream& out, JSC::CodeType codeType) +{ + switch (codeType) { + case JSC::GlobalCode: + out.print("Global"); + return; + case JSC::EvalCode: + out.print("Eval"); + return; + case JSC::FunctionCode: + out.print("Function"); + return; + default: + CRASH(); + return; + } +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/CodeType.h b/Source/JavaScriptCore/bytecode/CodeType.h new file mode 100644 index 000000000..b8e107dcf --- /dev/null +++ b/Source/JavaScriptCore/bytecode/CodeType.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeType_h +#define CodeType_h + +namespace JSC { + +enum CodeType { GlobalCode, EvalCode, FunctionCode }; + +} // namespace JSC + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::CodeType); + +} // namespace WTF + +#endif // CodeType_h + diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp new file mode 100644 index 000000000..d4ea3baa8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ComplexGetStatus.h" + +#include "JSCInlines.h" + +namespace JSC { + +ComplexGetStatus ComplexGetStatus::computeFor( + Structure* headStructure, const ObjectPropertyConditionSet& conditionSet, UniquedStringImpl* uid) +{ + // FIXME: We should assert that we never see a structure that + // hasImpureGetOwnPropertySlot() but for which we don't + // newImpurePropertyFiresWatchpoints(). We're not at a point where we can do + // that, yet. + // https://bugs.webkit.org/show_bug.cgi?id=131810 + + ASSERT(conditionSet.isValid()); + + if (headStructure->takesSlowPathInDFGForImpureProperty()) + return takesSlowPath(); + + ComplexGetStatus result; + result.m_kind = Inlineable; + + if (!conditionSet.isEmpty()) { + result.m_conditionSet = conditionSet; + + if (!result.m_conditionSet.structuresEnsureValidity()) + return skip(); + + unsigned numberOfSlotBases = + result.m_conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence); + RELEASE_ASSERT(numberOfSlotBases <= 1); + if (!numberOfSlotBases) { + // Currently we don't support misses. That's a bummer. + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=133052 + return takesSlowPath(); + } + ObjectPropertyCondition base = result.m_conditionSet.slotBaseCondition(); + ASSERT(base.kind() == PropertyCondition::Presence); + + result.m_offset = base.offset(); + } else + result.m_offset = headStructure->getConcurrently(uid); + + if (!isValidOffset(result.m_offset)) + return takesSlowPath(); + + return result; +} + +} // namespace JSC + + diff --git a/Source/JavaScriptCore/bytecode/ComplexGetStatus.h b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h new file mode 100644 index 000000000..a06e995d5 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ComplexGetStatus.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ComplexGetStatus_h +#define ComplexGetStatus_h + +#include "JSCJSValue.h" +#include "ObjectPropertyConditionSet.h" +#include "PropertyOffset.h" + +namespace JSC { + +class CodeBlock; +class StructureChain; + +// This class is useful for figuring out how to inline a cached get-like access. We +// say "get-like" because this is appropriate for loading the GetterSetter object in +// a put_by_id that hits a setter. Notably, this doesn't figure out how to call +// accessors, or even whether they should be called. What it gives us, is a way of +// determining how to load the value from the requested property (identified by a +// StringImpl* uid) from an object of the given structure in the given CodeBlock, +// assuming that such an access had already been cached by Repatch (and so Repatch had +// already done a bunch of safety checks). This doesn't reexecute any checks that +// Repatch would have executed, and for prototype chain accesses, it doesn't ask the +// objects in the prototype chain whether their getOwnPropertySlot would attempt to +// intercept the access - so this really is only appropriate if you already know that +// one of the JITOperations had OK'd this for caching and that Repatch concurred. +// +// The typical use pattern is something like: +// +// ComplexGetStatus status = ComplexGetStatus::computeFor(...); +// switch (status.kind()) { +// case ComplexGetStatus::ShouldSkip: +// // Handle the case where this kind of access is possibly safe but wouldn't +// // pass the required safety checks. For example, if an IC gives us a list of +// // accesses and one of them is ShouldSkip, then we should pretend as if it +// // wasn't even there. +// break; +// case ComplexGetStatus::TakesSlowPath: +// // This kind of access is not safe to inline. Bail out of any attempst to +// // inline. +// break; +// case ComplexGetStatus::Inlineable: +// // The good stuff goes here. If it's Inlineable then the other properties of +// // the 'status' object will tell you everything you need to know about how +// // to execute the get-like operation. +// break; +// } + +class ComplexGetStatus { +public: + enum Kind { + ShouldSkip, + TakesSlowPath, + Inlineable + }; + + ComplexGetStatus() + : m_kind(ShouldSkip) + , m_offset(invalidOffset) + { + } + + static ComplexGetStatus skip() + { + return ComplexGetStatus(); + } + + static ComplexGetStatus takesSlowPath() + { + ComplexGetStatus result; + result.m_kind = TakesSlowPath; + return result; + } + + static ComplexGetStatus computeFor( + Structure* headStructure, const ObjectPropertyConditionSet&, UniquedStringImpl* uid); + + Kind kind() const { return m_kind; } + PropertyOffset offset() const { return m_offset; } + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + +private: + Kind m_kind; + PropertyOffset m_offset; + ObjectPropertyConditionSet m_conditionSet; +}; + +} // namespace JSC + +#endif // ComplexGetStatus_h + diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp new file mode 100644 index 000000000..40a25ced6 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGExitProfile.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +ExitProfile::ExitProfile() { } +ExitProfile::~ExitProfile() { } + +bool ExitProfile::add(const ConcurrentJITLocker&, const FrequentExitSite& site) +{ + ASSERT(site.jitType() != ExitFromAnything); + + // If we've never seen any frequent exits then create the list and put this site + // into it. + if (!m_frequentExitSites) { + m_frequentExitSites = std::make_unique<Vector<FrequentExitSite>>(); + m_frequentExitSites->append(site); + return true; + } + + // Don't add it if it's already there. This is O(n), but that's OK, because we + // know that the total number of places where code exits tends to not be large, + // and this code is only used when recompilation is triggered. + for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) { + if (m_frequentExitSites->at(i) == site) + return false; + } + + m_frequentExitSites->append(site); + return true; +} + +Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex) +{ + Vector<FrequentExitSite> result; + + if (!m_frequentExitSites) + return result; + + for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) { + if (m_frequentExitSites->at(i).bytecodeOffset() == bytecodeIndex) + result.append(m_frequentExitSites->at(i)); + } + + return result; +} + +bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite& site) const +{ + if (!m_frequentExitSites) + return false; + + for (unsigned i = m_frequentExitSites->size(); i--;) { + if (site.subsumes(m_frequentExitSites->at(i))) + return true; + } + return false; +} + +QueryableExitProfile::QueryableExitProfile() { } +QueryableExitProfile::~QueryableExitProfile() { } + +void QueryableExitProfile::initialize(const ConcurrentJITLocker&, const ExitProfile& profile) +{ + if (!profile.m_frequentExitSites) + return; + + for (unsigned i = 0; i < profile.m_frequentExitSites->size(); ++i) + m_frequentExitSites.add(profile.m_frequentExitSites->at(i)); +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h new file mode 100644 index 000000000..cdecbaf97 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGExitProfile_h +#define DFGExitProfile_h + +#if ENABLE(DFG_JIT) + +#include "ConcurrentJITLock.h" +#include "ExitKind.h" +#include "ExitingJITType.h" +#include <wtf/HashSet.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +class FrequentExitSite { +public: + FrequentExitSite() + : m_bytecodeOffset(0) // 0 = empty value + , m_kind(ExitKindUnset) + , m_jitType(ExitFromAnything) + { + } + + FrequentExitSite(WTF::HashTableDeletedValueType) + : m_bytecodeOffset(1) // 1 = deleted value + , m_kind(ExitKindUnset) + , m_jitType(ExitFromAnything) + { + } + + explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind, ExitingJITType jitType = ExitFromAnything) + : m_bytecodeOffset(bytecodeOffset) + , m_kind(kind) + , m_jitType(jitType) + { + if (m_kind == ArgumentsEscaped) { + // Count this one globally. It doesn't matter where in the code block the arguments excaped; + // the fact that they did is not associated with any particular instruction. + m_bytecodeOffset = 0; + } + } + + // Use this constructor if you wish for the exit site to be counted globally within its + // code block. + explicit FrequentExitSite(ExitKind kind, ExitingJITType jitType = ExitFromAnything) + : m_bytecodeOffset(0) + , m_kind(kind) + , m_jitType(jitType) + { + } + + bool operator!() const + { + return m_kind == ExitKindUnset; + } + + bool operator==(const FrequentExitSite& other) const + { + return m_bytecodeOffset == other.m_bytecodeOffset + && m_kind == other.m_kind + && m_jitType == other.m_jitType; + } + + bool subsumes(const FrequentExitSite& other) const + { + if (m_bytecodeOffset != other.m_bytecodeOffset) + return false; + if (m_kind != other.m_kind) + return false; + if (m_jitType == ExitFromAnything) + return true; + return m_jitType == other.m_jitType; + } + + unsigned hash() const + { + return WTF::intHash(m_bytecodeOffset) + m_kind + m_jitType * 7; + } + + unsigned bytecodeOffset() const { return m_bytecodeOffset; } + ExitKind kind() const { return m_kind; } + ExitingJITType jitType() const { return m_jitType; } + + FrequentExitSite withJITType(ExitingJITType jitType) const + { + FrequentExitSite result = *this; + result.m_jitType = jitType; + return result; + } + + bool isHashTableDeletedValue() const + { + return m_kind == ExitKindUnset && m_bytecodeOffset; + } + +private: + unsigned m_bytecodeOffset; + ExitKind m_kind; + ExitingJITType m_jitType; +}; + +struct FrequentExitSiteHash { + static unsigned hash(const FrequentExitSite& key) { return key.hash(); } + static bool equal(const FrequentExitSite& a, const FrequentExitSite& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::FrequentExitSite> { + typedef JSC::DFG::FrequentExitSiteHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::FrequentExitSite> : SimpleClassHashTraits<JSC::DFG::FrequentExitSite> { }; + +} // namespace WTF + +namespace JSC { namespace DFG { + +class QueryableExitProfile; + +class ExitProfile { +public: + ExitProfile(); + ~ExitProfile(); + + // Add a new frequent exit site. Return true if this is a new one, or false + // if we already knew about it. This is an O(n) operation, because it errs + // on the side of keeping the data structure compact. Also, this will only + // be called a fixed number of times per recompilation. Recompilation is + // rare to begin with, and implies doing O(n) operations on the CodeBlock + // anyway. + bool add(const ConcurrentJITLocker&, const FrequentExitSite&); + + // Get the frequent exit sites for a bytecode index. This is O(n), and is + // meant to only be used from debugging/profiling code. + Vector<FrequentExitSite> exitSitesFor(unsigned bytecodeIndex); + + // This is O(n) and should be called on less-frequently executed code paths + // in the compiler. It should be strictly cheaper than building a + // QueryableExitProfile, if you really expect this to be called infrequently + // and you believe that there are few exit sites. + bool hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite&) const; + bool hasExitSite(const ConcurrentJITLocker& locker, ExitKind kind) const + { + return hasExitSite(locker, FrequentExitSite(kind)); + } + bool hasExitSite(const ConcurrentJITLocker& locker, unsigned bytecodeIndex, ExitKind kind) const + { + return hasExitSite(locker, FrequentExitSite(bytecodeIndex, kind)); + } + +private: + friend class QueryableExitProfile; + + std::unique_ptr<Vector<FrequentExitSite>> m_frequentExitSites; +}; + +class QueryableExitProfile { +public: + QueryableExitProfile(); + ~QueryableExitProfile(); + + void initialize(const ConcurrentJITLocker&, const ExitProfile&); + + bool hasExitSite(const FrequentExitSite& site) const + { + if (site.jitType() == ExitFromAnything) { + return hasExitSite(site.withJITType(ExitFromDFG)) + || hasExitSite(site.withJITType(ExitFromFTL)); + } + return m_frequentExitSites.find(site) != m_frequentExitSites.end(); + } + + bool hasExitSite(ExitKind kind) const + { + return hasExitSite(FrequentExitSite(kind)); + } + + bool hasExitSite(unsigned bytecodeIndex, ExitKind kind) const + { + return hasExitSite(FrequentExitSite(bytecodeIndex, kind)); + } +private: + HashSet<FrequentExitSite> m_frequentExitSites; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGExitProfile_h diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h new file mode 100644 index 000000000..6d7542e2d --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DataFormat.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DataFormat_h +#define DataFormat_h + +#include <wtf/Assertions.h> + +namespace JSC { + +// === DataFormat === +// +// This enum tracks the current representation in which a value is being held. +// Values may be unboxed primitives (int32, double, or cell), or boxed as a JSValue. +// For boxed values, we may know the type of boxing that has taken place. +// (May also need bool, array, object, string types!) +enum DataFormat { + DataFormatNone = 0, + DataFormatInt32 = 1, + DataFormatInt52 = 2, // Int52's are left-shifted by 16 by default. + DataFormatStrictInt52 = 3, // "Strict" Int52 means it's not shifted. + DataFormatDouble = 4, + DataFormatBoolean = 5, + DataFormatCell = 6, + DataFormatStorage = 7, + DataFormatJS = 8, + DataFormatJSInt32 = DataFormatJS | DataFormatInt32, + DataFormatJSDouble = DataFormatJS | DataFormatDouble, + DataFormatJSCell = DataFormatJS | DataFormatCell, + DataFormatJSBoolean = DataFormatJS | DataFormatBoolean, + + // Marker deliminating ordinary data formats and OSR-only data formats. + DataFormatOSRMarker = 32, + + // Special data formats used only for OSR. + DataFormatDead = 33, // Implies jsUndefined(). +}; + +inline const char* dataFormatToString(DataFormat dataFormat) +{ + switch (dataFormat) { + case DataFormatNone: + return "None"; + case DataFormatInt32: + return "Int32"; + case DataFormatInt52: + return "Int52"; + case DataFormatStrictInt52: + return "StrictInt52"; + case DataFormatDouble: + return "Double"; + case DataFormatCell: + return "Cell"; + case DataFormatBoolean: + return "Boolean"; + case DataFormatStorage: + return "Storage"; + case DataFormatJS: + return "JS"; + case DataFormatJSInt32: + return "JSInt32"; + case DataFormatJSDouble: + return "JSDouble"; + case DataFormatJSCell: + return "JSCell"; + case DataFormatJSBoolean: + return "JSBoolean"; + case DataFormatDead: + return "Dead"; + default: + RELEASE_ASSERT_NOT_REACHED(); + return "Unknown"; + } +} + +inline bool isJSFormat(DataFormat format, DataFormat expectedFormat) +{ + ASSERT(expectedFormat & DataFormatJS); + return (format | DataFormatJS) == expectedFormat; +} + +inline bool isJSInt32(DataFormat format) +{ + return isJSFormat(format, DataFormatJSInt32); +} + +inline bool isJSDouble(DataFormat format) +{ + return isJSFormat(format, DataFormatJSDouble); +} + +inline bool isJSCell(DataFormat format) +{ + return isJSFormat(format, DataFormatJSCell); +} + +inline bool isJSBoolean(DataFormat format) +{ + return isJSFormat(format, DataFormatJSBoolean); +} + +} + +#endif // DataFormat_h diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp new file mode 100644 index 000000000..761e95b3f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DeferredCompilationCallback.h" + +#include "CodeBlock.h" + +namespace JSC { + +DeferredCompilationCallback::DeferredCompilationCallback() { } +DeferredCompilationCallback::~DeferredCompilationCallback() { } + +void DeferredCompilationCallback::compilationDidComplete(CodeBlock* codeBlock, CompilationResult result) +{ + dumpCompiledSourcesIfNeeded(); + + switch (result) { + case CompilationFailed: + case CompilationInvalidated: + codeBlock->heap()->removeCodeBlock(codeBlock); + break; + case CompilationSuccessful: + break; + case CompilationDeferred: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +Vector<DeferredSourceDump>& DeferredCompilationCallback::ensureDeferredSourceDump() +{ + if (!m_deferredSourceDump) + m_deferredSourceDump = std::make_unique<Vector<DeferredSourceDump>>(); + return *m_deferredSourceDump; +} + +void DeferredCompilationCallback::dumpCompiledSourcesIfNeeded() +{ + if (!m_deferredSourceDump) + return; + + ASSERT(Options::dumpSourceAtDFGTime()); + unsigned index = 0; + for (auto& info : *m_deferredSourceDump) { + dataLog("[", ++index, "] "); + info.dump(); + } +} + +} // JSC + diff --git a/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h new file mode 100644 index 000000000..37568d222 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredCompilationCallback.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DeferredCompilationCallback_h +#define DeferredCompilationCallback_h + +#include "CompilationResult.h" +#include "DeferredSourceDump.h" +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; + +class DeferredCompilationCallback : public RefCounted<DeferredCompilationCallback> { +protected: + DeferredCompilationCallback(); + +public: + virtual ~DeferredCompilationCallback(); + + virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) = 0; + virtual void compilationDidComplete(CodeBlock*, CompilationResult); + + Vector<DeferredSourceDump>& ensureDeferredSourceDump(); + +private: + void dumpCompiledSourcesIfNeeded(); + + std::unique_ptr<Vector<DeferredSourceDump>> m_deferredSourceDump; +}; + +} // namespace JSC + +#endif // DeferredCompilationCallback_h + diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp new file mode 100644 index 000000000..48079db66 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DeferredSourceDump.h" + +#include "CodeBlock.h" +#include "CodeBlockWithJITType.h" + +namespace JSC { + +DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock) + : m_codeBlock(codeBlock) + , m_rootCodeBlock(nullptr) + , m_rootJITType(JITCode::None) +{ +} + +DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin) + : m_codeBlock(codeBlock) + , m_rootCodeBlock(rootCodeBlock) + , m_rootJITType(rootJITType) + , m_callerCodeOrigin(callerCodeOrigin) +{ +} + +void DeferredSourceDump::dump() +{ + bool isInlinedFrame = !!m_rootCodeBlock; + if (isInlinedFrame) + dataLog("Inlined "); + else + dataLog("Compiled "); + dataLog(*m_codeBlock); + + if (isInlinedFrame) + dataLog(" at ", CodeBlockWithJITType(m_rootCodeBlock, m_rootJITType), " ", m_callerCodeOrigin); + + dataLog("\n'''"); + m_codeBlock->dumpSource(); + dataLog("'''\n"); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/DeferredSourceDump.h b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h new file mode 100644 index 000000000..72cb6b3b8 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/DeferredSourceDump.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DeferredSourceDump_h +#define DeferredSourceDump_h + +#include "CodeOrigin.h" +#include "JITCode.h" + +namespace JSC { + +class CodeBlock; + +class DeferredSourceDump { +public: + DeferredSourceDump(CodeBlock*); + DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, CodeOrigin callerCodeOrigin); + + void dump(); + +private: + CodeBlock* m_codeBlock; + CodeBlock* m_rootCodeBlock; + JITCode::JITType m_rootJITType; + CodeOrigin m_callerCodeOrigin; +}; + +} // namespace JSC + +#endif // DeferredSourceDump_h diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h new file mode 100644 index 000000000..4d5909338 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/EvalCodeCache.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EvalCodeCache_h +#define EvalCodeCache_h + +#include "Executable.h" +#include "JSGlobalObject.h" +#include "Options.h" +#include "SourceCode.h" +#include <wtf/HashMap.h> +#include <wtf/RefPtr.h> +#include <wtf/text/StringHash.h> + +namespace JSC { + + class SlotVisitor; + + class EvalCodeCache { + public: + EvalExecutable* tryGet(bool inStrictContext, const String& evalSource, JSScope* scope) + { + if (isCacheable(inStrictContext, evalSource, scope)) + return m_cacheMap.get(evalSource.impl()).get(); + return 0; + } + + EvalExecutable* getSlow(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, ThisTDZMode thisTDZMode, const String& evalSource, JSScope* scope) + { + VariableEnvironment variablesUnderTDZ; + JSScope::collectVariablesUnderTDZ(scope, variablesUnderTDZ); + EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext, thisTDZMode, &variablesUnderTDZ); + if (!evalExecutable) + return 0; + + if (isCacheable(inStrictContext, evalSource, scope) && m_cacheMap.size() < maxCacheEntries) + m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->vm(), owner, evalExecutable)); + + return evalExecutable; + } + + bool isEmpty() const { return m_cacheMap.isEmpty(); } + + void visitAggregate(SlotVisitor&); + + void clear() + { + m_cacheMap.clear(); + } + + private: + ALWAYS_INLINE bool isCacheable(bool inStrictContext, const String& evalSource, JSScope* scope) const + { + // If eval() is called and it has access to a lexical scope, we can't soundly cache it. + // If the eval() only has access to the "var" scope, then we can cache it. + return !inStrictContext + && evalSource.length() < Options::maximumEvalCacheableSourceLength() + && scope->begin()->isVariableObject() + && !scope->isLexicalScope() + && !scope->isCatchScope(); + } + static const int maxCacheEntries = 64; + + typedef HashMap<RefPtr<StringImpl>, WriteBarrier<EvalExecutable>> EvalCacheMap; + EvalCacheMap m_cacheMap; + }; + +} // namespace JSC + +#endif // EvalCodeCache_h diff --git a/Source/JavaScriptCore/bytecode/ExecutableInfo.h b/Source/JavaScriptCore/bytecode/ExecutableInfo.h new file mode 100644 index 000000000..c56a77008 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExecutableInfo.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutableInfo_h +#define ExecutableInfo_h + +#include "ParserModes.h" + +namespace JSC { + +struct ExecutableInfo { + ExecutableInfo(bool needsActivation, bool usesEval, bool isStrictMode, bool isConstructor, bool isBuiltinFunction, ConstructorKind constructorKind) + : m_needsActivation(needsActivation) + , m_usesEval(usesEval) + , m_isStrictMode(isStrictMode) + , m_isConstructor(isConstructor) + , m_isBuiltinFunction(isBuiltinFunction) + , m_constructorKind(static_cast<unsigned>(constructorKind)) + { + ASSERT(m_constructorKind == static_cast<unsigned>(constructorKind)); + } + + bool needsActivation() const { return m_needsActivation; } + bool usesEval() const { return m_usesEval; } + bool isStrictMode() const { return m_isStrictMode; } + bool isConstructor() const { return m_isConstructor; } + bool isBuiltinFunction() const { return m_isBuiltinFunction; } + ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); } + +private: + unsigned m_needsActivation : 1; + unsigned m_usesEval : 1; + unsigned m_isStrictMode : 1; + unsigned m_isConstructor : 1; + unsigned m_isBuiltinFunction : 1; + unsigned m_constructorKind : 2; +}; + +} // namespace JSC + +#endif // ExecutableInfo_h diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp new file mode 100644 index 000000000..fe4e430f1 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExecutionCounter.h" + +#include "CodeBlock.h" +#include "ExecutableAllocator.h" +#include "JSCInlines.h" +#include <wtf/StringExtras.h> + +namespace JSC { + +template<CountingVariant countingVariant> +ExecutionCounter<countingVariant>::ExecutionCounter() +{ + reset(); +} + +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::forceSlowPathConcurrently() +{ + m_counter = 0; +} + +template<CountingVariant countingVariant> +bool ExecutionCounter<countingVariant>::checkIfThresholdCrossedAndSet(CodeBlock* codeBlock) +{ + if (hasCrossedThreshold(codeBlock)) + return true; + + if (setThreshold(codeBlock)) + return true; + + return false; +} + +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::setNewThreshold(int32_t threshold, CodeBlock* codeBlock) +{ + reset(); + m_activeThreshold = threshold; + setThreshold(codeBlock); +} + +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::deferIndefinitely() +{ + m_totalCount = 0; + m_activeThreshold = std::numeric_limits<int32_t>::max(); + m_counter = std::numeric_limits<int32_t>::min(); +} + +double applyMemoryUsageHeuristics(int32_t value, CodeBlock* codeBlock) +{ +#if ENABLE(JIT) + double multiplier = + ExecutableAllocator::memoryPressureMultiplier( + codeBlock->predictedMachineCodeSize()); +#else + // This code path will probably not be taken, but if it is, we fake it. + double multiplier = 1.0; + UNUSED_PARAM(codeBlock); +#endif + ASSERT(multiplier >= 1.0); + return multiplier * value; +} + +int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock* codeBlock) +{ + double doubleResult = applyMemoryUsageHeuristics(value, codeBlock); + + ASSERT(doubleResult >= 0); + + if (doubleResult > std::numeric_limits<int32_t>::max()) + return std::numeric_limits<int32_t>::max(); + + return static_cast<int32_t>(doubleResult); +} + +template<CountingVariant countingVariant> +bool ExecutionCounter<countingVariant>::hasCrossedThreshold(CodeBlock* codeBlock) const +{ + // This checks if the current count rounded up to the threshold we were targeting. + // For example, if we are using half of available executable memory and have + // m_activeThreshold = 1000, applyMemoryUsageHeuristics(m_activeThreshold) will be + // 2000, but we will pretend as if the threshold was crossed if we reach 2000 - + // 1000 / 2, or 1500. The reasoning here is that we want to avoid thrashing. If + // this method returns false, then the JIT's threshold for when it will again call + // into the slow path (which will call this method a second time) will be set + // according to the difference between the current count and the target count + // according to *current* memory usage. But by the time we call into this again, we + // may have JIT'ed more code, and so the target count will increase slightly. This + // may lead to a repeating pattern where the target count is slightly incremented, + // the JIT immediately matches that increase, calls into the slow path again, and + // again the target count is slightly incremented. Instead of having this vicious + // cycle, we declare victory a bit early if the difference between the current + // total and our target according to memory heuristics is small. Our definition of + // small is arbitrarily picked to be half of the original threshold (i.e. + // m_activeThreshold). + + double modifiedThreshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock); + + return static_cast<double>(m_totalCount) + m_counter >= + modifiedThreshold - static_cast<double>( + std::min(m_activeThreshold, maximumExecutionCountsBetweenCheckpoints())) / 2; +} + +template<CountingVariant countingVariant> +bool ExecutionCounter<countingVariant>::setThreshold(CodeBlock* codeBlock) +{ + if (m_activeThreshold == std::numeric_limits<int32_t>::max()) { + deferIndefinitely(); + return false; + } + + // Compute the true total count. + double trueTotalCount = count(); + + // Correct the threshold for current memory usage. + double threshold = applyMemoryUsageHeuristics(m_activeThreshold, codeBlock); + + // Threshold must be non-negative and not NaN. + ASSERT(threshold >= 0); + + // Adjust the threshold according to the number of executions we have already + // seen. This shouldn't go negative, but it might, because of round-off errors. + threshold -= trueTotalCount; + + if (threshold <= 0) { + m_counter = 0; + m_totalCount = trueTotalCount; + return true; + } + + threshold = clippedThreshold(codeBlock->globalObject(), threshold); + + m_counter = static_cast<int32_t>(-threshold); + + m_totalCount = trueTotalCount + threshold; + + return false; +} + +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::reset() +{ + m_counter = 0; + m_totalCount = 0; + m_activeThreshold = 0; +} + +template<CountingVariant countingVariant> +void ExecutionCounter<countingVariant>::dump(PrintStream& out) const +{ + out.printf("%lf/%lf, %d", count(), static_cast<double>(m_activeThreshold), m_counter); +} + +template class ExecutionCounter<CountingForBaseline>; +template class ExecutionCounter<CountingForUpperTiers>; + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h new file mode 100644 index 000000000..5002c6c67 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutionCounter_h +#define ExecutionCounter_h + +#include "JSGlobalObject.h" +#include "Options.h" +#include <wtf/PrintStream.h> +#include <wtf/SimpleStats.h> + +namespace JSC { + +class CodeBlock; + +enum CountingVariant { + CountingForBaseline, + CountingForUpperTiers +}; + +double applyMemoryUsageHeuristics(int32_t value, CodeBlock*); +int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*); + +inline int32_t formattedTotalExecutionCount(float value) +{ + union { + int32_t i; + float f; + } u; + u.f = value; + return u.i; +} + +template<CountingVariant countingVariant> +class ExecutionCounter { +public: + ExecutionCounter(); + void forceSlowPathConcurrently(); // If you use this, checkIfThresholdCrossedAndSet() may still return false. + bool checkIfThresholdCrossedAndSet(CodeBlock*); + void setNewThreshold(int32_t threshold, CodeBlock*); + void deferIndefinitely(); + double count() const { return static_cast<double>(m_totalCount) + m_counter; } + void dump(PrintStream&) const; + + static int32_t maximumExecutionCountsBetweenCheckpoints() + { + switch (countingVariant) { + case CountingForBaseline: + return Options::maximumExecutionCountsBetweenCheckpointsForBaseline(); + case CountingForUpperTiers: + return Options::maximumExecutionCountsBetweenCheckpointsForUpperTiers(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + } + + template<typename T> + static T clippedThreshold(JSGlobalObject* globalObject, T threshold) + { + int32_t maxThreshold; + if (Options::randomizeExecutionCountsBetweenCheckpoints()) + maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints(); + else + maxThreshold = maximumExecutionCountsBetweenCheckpoints(); + if (threshold > maxThreshold) + threshold = maxThreshold; + return threshold; + } + +private: + bool hasCrossedThreshold(CodeBlock*) const; + bool setThreshold(CodeBlock*); + void reset(); + +public: + // NB. These are intentionally public because it will be modified from machine code. + + // This counter is incremented by the JIT or LLInt. It starts out negative and is + // counted up until it becomes non-negative. At the start of a counting period, + // the threshold we wish to reach is m_totalCount + m_counter, in the sense that + // we will add X to m_totalCount and subtract X from m_counter. + int32_t m_counter; + + // Counts the total number of executions we have seen plus the ones we've set a + // threshold for in m_counter. Because m_counter's threshold is negative, the + // total number of actual executions can always be computed as m_totalCount + + // m_counter. + float m_totalCount; + + // This is the threshold we were originally targeting, without any correction for + // the memory usage heuristics. + int32_t m_activeThreshold; +}; + +typedef ExecutionCounter<CountingForBaseline> BaselineExecutionCounter; +typedef ExecutionCounter<CountingForUpperTiers> UpperTierExecutionCounter; + +} // namespace JSC + +#endif // ExecutionCounter_h + diff --git a/Source/JavaScriptCore/bytecode/ExitKind.cpp b/Source/JavaScriptCore/bytecode/ExitKind.cpp new file mode 100644 index 000000000..2524300f7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExitKind.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExitKind.h" + +#include <wtf/Assertions.h> +#include <wtf/PrintStream.h> + +namespace JSC { + +const char* exitKindToString(ExitKind kind) +{ + switch (kind) { + case ExitKindUnset: + return "Unset"; + case BadType: + return "BadType"; + case BadCell: + return "BadCell"; + case BadIdent: + return "BadIdent"; + case BadExecutable: + return "BadExecutable"; + case BadCache: + return "BadCache"; + case BadConstantCache: + return "BadConstantCache"; + case BadIndexingType: + return "BadIndexingType"; + case Overflow: + return "Overflow"; + case NegativeZero: + return "NegativeZero"; + case Int52Overflow: + return "Int52Overflow"; + case StoreToHole: + return "StoreToHole"; + case LoadFromHole: + return "LoadFromHole"; + case OutOfBounds: + return "OutOfBounds"; + case InadequateCoverage: + return "InadequateCoverage"; + case ArgumentsEscaped: + return "ArgumentsEscaped"; + case ExoticObjectMode: + return "ExoticObjectMode"; + case NotStringObject: + return "NotStringObject"; + case VarargsOverflow: + return "VarargsOverflow"; + case TDZFailure: + return "TDZFailure"; + case Uncountable: + return "Uncountable"; + case UncountableInvalidation: + return "UncountableInvalidation"; + case WatchdogTimerFired: + return "WatchdogTimerFired"; + case DebuggerEvent: + return "DebuggerEvent"; + } + RELEASE_ASSERT_NOT_REACHED(); + return "Unknown"; +} + +bool exitKindIsCountable(ExitKind kind) +{ + switch (kind) { + case ExitKindUnset: + RELEASE_ASSERT_NOT_REACHED(); + case BadType: + case Uncountable: + case LoadFromHole: // Already counted directly by the baseline JIT. + case StoreToHole: // Already counted directly by the baseline JIT. + case OutOfBounds: // Already counted directly by the baseline JIT. + return false; + default: + return true; + } +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream& out, JSC::ExitKind kind) +{ + out.print(exitKindToString(kind)); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/ExitKind.h b/Source/JavaScriptCore/bytecode/ExitKind.h new file mode 100644 index 000000000..6f8c51200 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExitKind.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExitKind_h +#define ExitKind_h + +namespace JSC { + +enum ExitKind : uint8_t { + ExitKindUnset, + BadType, // We exited because a type prediction was wrong. + BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks. + BadIdent, // We exited because we made an incorrect assumption about what identifier we would see. Usually used for cached Id check in get_by_val. + BadExecutable, // We exited because we made an incorrect assumption about what executable we would see. + BadCache, // We exited because an inline cache was wrong. + BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong. + BadIndexingType, // We exited because an indexing type was wrong. + Overflow, // We exited because of overflow. + NegativeZero, // We exited because we encountered negative zero. + Int52Overflow, // We exited because of an Int52 overflow. + StoreToHole, // We had a store to a hole. + LoadFromHole, // We had a load from a hole. + OutOfBounds, // We had an out-of-bounds access to an array. + InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage. + ArgumentsEscaped, // We exited because arguments escaped but we didn't expect them to. + ExoticObjectMode, // We exited because some exotic object that we were accessing was in an exotic mode (like Arguments with slow arguments). + NotStringObject, // We exited because we shouldn't have attempted to optimize string object access. + VarargsOverflow, // We exited because a varargs call passed more arguments than we expected. + TDZFailure, // We exited because we were in the TDZ and accessed the variable. + Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME. + UncountableInvalidation, // We exited because the code block was invalidated; this means that we've already counted the reasons why the code block was invalidated. + WatchdogTimerFired, // We exited because we need to service the watchdog timer. + DebuggerEvent // We exited because we need to service the debugger. +}; + +const char* exitKindToString(ExitKind); +bool exitKindIsCountable(ExitKind); + +} // namespace JSC + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::ExitKind); + +} // namespace WTF + +#endif // ExitKind_h + diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.cpp b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp new file mode 100644 index 000000000..aa8f120b6 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExitingJITType.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExitingJITType.h" + +#include <wtf/PrintStream.h> + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, ExitingJITType type) +{ + switch (type) { + case ExitFromAnything: + out.print("FromAnything"); + return; + case ExitFromDFG: + out.print("FromDFG"); + return; + case ExitFromFTL: + out.print("FromFTL"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/ExitingJITType.h b/Source/JavaScriptCore/bytecode/ExitingJITType.h new file mode 100644 index 000000000..e8ed03e41 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExitingJITType.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExitingJITType_h +#define ExitingJITType_h + +#include "JITCode.h" + +namespace JSC { + +enum ExitingJITType : uint8_t { + ExitFromAnything, + ExitFromDFG, + ExitFromFTL +}; + +inline ExitingJITType exitingJITTypeFor(JITCode::JITType type) +{ + switch (type) { + case JITCode::DFGJIT: + return ExitFromDFG; + case JITCode::FTLJIT: + return ExitFromFTL; + default: + RELEASE_ASSERT_NOT_REACHED(); + return ExitFromAnything; + } +} + +} // namespace JSC + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::ExitingJITType); + +} // namespace WTF + +#endif // ExitingJITType_h + diff --git a/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h new file mode 100644 index 000000000..855738aec --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ExpressionRangeInfo.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExpressionRangeInfo_h +#define ExpressionRangeInfo_h + +#include <wtf/StdLibExtras.h> + +namespace JSC { + +struct ExpressionRangeInfo { + // Line and column values are encoded in 1 of 3 modes depending on the size + // of their values. These modes are: + // + // 1. FatLine: 22-bit line, 8-bit column. + // 2. FatColumn: 8-bit line, 22-bit column. + // 3. FatLineAndColumn: 32-bit line, 32-bit column. + // + // For the first 2 modes, the line and column will be encoded in the 30-bit + // position field in the ExpressionRangeInfo. For the FatLineAndColumn mode, + // the position field will hold an index into a FatPosition vector which + // holds the FatPosition records with the full 32-bit line and column values. + + enum { + FatLineMode, + FatColumnMode, + FatLineAndColumnMode + }; + + struct FatPosition { + uint32_t line; + uint32_t column; + }; + + enum { + FatLineModeLineShift = 8, + FatLineModeLineMask = (1 << 22) - 1, + FatLineModeColumnMask = (1 << 8) - 1, + FatColumnModeLineShift = 22, + FatColumnModeLineMask = (1 << 8) - 1, + FatColumnModeColumnMask = (1 << 22) - 1 + }; + + enum { + MaxOffset = (1 << 7) - 1, + MaxDivot = (1 << 25) - 1, + MaxFatLineModeLine = (1 << 22) - 1, + MaxFatLineModeColumn = (1 << 8) - 1, + MaxFatColumnModeLine = (1 << 8) - 1, + MaxFatColumnModeColumn = (1 << 22) - 1 + }; + + void encodeFatLineMode(unsigned line, unsigned column) + { + ASSERT(line <= MaxFatLineModeLine); + ASSERT(column <= MaxFatLineModeColumn); + position = ((line & FatLineModeLineMask) << FatLineModeLineShift | (column & FatLineModeColumnMask)); + } + + void encodeFatColumnMode(unsigned line, unsigned column) + { + ASSERT(line <= MaxFatColumnModeLine); + ASSERT(column <= MaxFatColumnModeColumn); + position = ((line & FatColumnModeLineMask) << FatColumnModeLineShift | (column & FatColumnModeColumnMask)); + } + + void decodeFatLineMode(unsigned& line, unsigned& column) + { + line = (position >> FatLineModeLineShift) & FatLineModeLineMask; + column = position & FatLineModeColumnMask; + } + + void decodeFatColumnMode(unsigned& line, unsigned& column) + { + line = (position >> FatColumnModeLineShift) & FatColumnModeLineMask; + column = position & FatColumnModeColumnMask; + } + + uint32_t instructionOffset : 25; + uint32_t startOffset : 7; + uint32_t divotPoint : 25; + uint32_t endOffset : 7; + uint32_t mode : 2; + uint32_t position : 30; +}; + +} // namespace JSC + +#endif // ExpressionRangeInfo_h + diff --git a/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h new file mode 100644 index 000000000..b22198a00 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/FullBytecodeLiveness.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FullBytecodeLiveness_h +#define FullBytecodeLiveness_h + +#include <wtf/FastBitVector.h> + +namespace JSC { + +class BytecodeLivenessAnalysis; + +typedef HashMap<unsigned, FastBitVector, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> BytecodeToBitmapMap; + +class FullBytecodeLiveness { + WTF_MAKE_FAST_ALLOCATED; +public: + const FastBitVector& getLiveness(unsigned bytecodeIndex) const + { + return m_map[bytecodeIndex]; + } + + bool operandIsLive(int operand, unsigned bytecodeIndex) const + { + return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(getLiveness(bytecodeIndex), operand); + } + +private: + friend class BytecodeLivenessAnalysis; + + Vector<FastBitVector, 0, UnsafeVectorOverflow> m_map; +}; + +} // namespace JSC + +#endif // FullBytecodeLiveness_h + diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp new file mode 100644 index 000000000..89e5035f3 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp @@ -0,0 +1,359 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GetByIdStatus.h" + +#include "AccessorCallJITStubRoutine.h" +#include "CodeBlock.h" +#include "ComplexGetStatus.h" +#include "JSCInlines.h" +#include "JSScope.h" +#include "LLIntData.h" +#include "LowLevelInterpreter.h" +#include "PolymorphicGetByIdList.h" +#include <wtf/ListDump.h> + +namespace JSC { + +bool GetByIdStatus::appendVariant(const GetByIdVariant& variant) +{ + // Attempt to merge this variant with an already existing variant. + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].attemptToMerge(variant)) + return true; + } + + // Make sure there is no overlap. We should have pruned out opportunities for + // overlap but it's possible that an inline cache got into a weird state. We are + // defensive and bail if we detect crazy. + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].structureSet().overlaps(variant.structureSet())) + return false; + } + + m_variants.append(variant); + return true; +} + +#if ENABLE(DFG_JIT) +bool GetByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); +} +#endif + +GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) +{ + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(uid); + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; + + if (instruction[0].u.opcode == LLInt::getOpcode(op_get_array_length)) + return GetByIdStatus(NoInformation, false); + + Structure* structure = instruction[4].u.structure.get(); + if (!structure) + return GetByIdStatus(NoInformation, false); + + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(NoInformation, false); + + unsigned attributesIgnored; + PropertyOffset offset = structure->getConcurrently(uid, attributesIgnored); + if (!isValidOffset(offset)) + return GetByIdStatus(NoInformation, false); + + return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); +} + +GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) +{ + ConcurrentJITLocker locker(profiledBlock->m_lock); + + GetByIdStatus result; + +#if ENABLE(DFG_JIT) + result = computeForStubInfoWithoutExitSiteFeedback( + locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); + + if (!result.takesSlowPath() + && hasExitSite(locker, profiledBlock, bytecodeIndex)) + return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true); +#else + UNUSED_PARAM(map); +#endif + + if (!result) + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); + + return result; +} + +#if ENABLE(JIT) +GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ + GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( + locker, profiledBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex)); + + if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) + return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true); + return result; +} +#endif // ENABLE(JIT) + +#if ENABLE(JIT) +GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid, + CallLinkStatus::ExitSiteData callExitSiteData) +{ + if (!stubInfo) + return GetByIdStatus(NoInformation); + + if (!stubInfo->seen) + return GetByIdStatus(NoInformation); + + PolymorphicGetByIdList* list = 0; + State slowPathState = TakesSlowPath; + if (stubInfo->accessType == access_get_by_id_list) { + list = stubInfo->u.getByIdList.list; + for (unsigned i = 0; i < list->size(); ++i) { + const GetByIdAccess& access = list->at(i); + if (access.doesCalls()) + slowPathState = MakesCalls; + } + } + + if (stubInfo->tookSlowPath) + return GetByIdStatus(slowPathState); + + // Finally figure out if we can derive an access strategy. + GetByIdStatus result; + result.m_state = Simple; + result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. + switch (stubInfo->accessType) { + case access_unset: + return GetByIdStatus(NoInformation); + + case access_get_by_id_self: { + Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); + if (structure->takesSlowPathInDFGForImpureProperty()) + return GetByIdStatus(slowPathState, true); + unsigned attributesIgnored; + GetByIdVariant variant; + variant.m_offset = structure->getConcurrently(uid, attributesIgnored); + if (!isValidOffset(variant.m_offset)) + return GetByIdStatus(slowPathState, true); + + variant.m_structureSet.add(structure); + bool didAppend = result.appendVariant(variant); + ASSERT_UNUSED(didAppend, didAppend); + return result; + } + + case access_get_by_id_list: { + for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { + Structure* structure = list->at(listIndex).structure(); + + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( + structure, list->at(listIndex).conditionSet(), uid); + + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return GetByIdStatus(slowPathState, true); + + case ComplexGetStatus::Inlineable: { + std::unique_ptr<CallLinkStatus> callLinkStatus; + switch (list->at(listIndex).type()) { + case GetByIdAccess::SimpleInline: + case GetByIdAccess::SimpleStub: { + break; + } + case GetByIdAccess::Getter: { + AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>( + list->at(listIndex).stubRoutine()); + callLinkStatus = std::make_unique<CallLinkStatus>( + CallLinkStatus::computeFor( + locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData)); + break; + } + case GetByIdAccess::SimpleMiss: + case GetByIdAccess::CustomGetter: + case GetByIdAccess::WatchedStub:{ + // FIXME: It would be totally sweet to support this at some point in the future. + // https://bugs.webkit.org/show_bug.cgi?id=133052 + return GetByIdStatus(slowPathState, true); + } + default: + RELEASE_ASSERT_NOT_REACHED(); + } + + GetByIdVariant variant( + StructureSet(structure), complexGetStatus.offset(), + complexGetStatus.conditionSet(), WTF::move(callLinkStatus)); + + if (!result.appendVariant(variant)) + return GetByIdStatus(slowPathState, true); + break; + } } + } + + return result; + } + + default: + return GetByIdStatus(slowPathState, true); + } + + RELEASE_ASSERT_NOT_REACHED(); + return GetByIdStatus(); +} +#endif // ENABLE(JIT) + +GetByIdStatus GetByIdStatus::computeFor( + CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, + StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ +#if ENABLE(DFG_JIT) + if (dfgBlock) { + CallLinkStatus::ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(profiledBlock->m_lock); + exitSiteData = CallLinkStatus::computeExitSiteData( + locker, profiledBlock, codeOrigin.bytecodeIndex); + } + + GetByIdStatus result; + { + ConcurrentJITLocker locker(dfgBlock->m_lock); + result = computeForStubInfoWithoutExitSiteFeedback( + locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); + } + + if (result.takesSlowPath()) + return result; + + { + ConcurrentJITLocker locker(profiledBlock->m_lock); + if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) + return GetByIdStatus(TakesSlowPath, true); + } + + if (result.isSet()) + return result; + } +#else + UNUSED_PARAM(dfgBlock); + UNUSED_PARAM(dfgMap); +#endif + + return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid); +} + +GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid) +{ + // For now we only handle the super simple self access case. We could handle the + // prototype case in the future. + + if (set.isEmpty()) + return GetByIdStatus(); + + if (parseIndex(*uid)) + return GetByIdStatus(TakesSlowPath); + + GetByIdStatus result; + result.m_state = Simple; + result.m_wasSeenInJIT = false; + for (unsigned i = 0; i < set.size(); ++i) { + Structure* structure = set[i]; + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) + return GetByIdStatus(TakesSlowPath); + + if (!structure->propertyAccessesAreCacheable()) + return GetByIdStatus(TakesSlowPath); + + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (!isValidOffset(offset)) + return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. + if (attributes & Accessor) + return GetByIdStatus(MakesCalls); // We could be smarter here, like strenght-reducing this to a Call. + + if (!result.appendVariant(GetByIdVariant(structure, offset))) + return GetByIdStatus(TakesSlowPath); + } + + return result; +} + +bool GetByIdStatus::makesCalls() const +{ + switch (m_state) { + case NoInformation: + case TakesSlowPath: + return false; + case Simple: + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].callLinkStatus()) + return true; + } + return false; + case MakesCalls: + return true; + } + RELEASE_ASSERT_NOT_REACHED(); + + return false; +} + +void GetByIdStatus::dump(PrintStream& out) const +{ + out.print("("); + switch (m_state) { + case NoInformation: + out.print("NoInformation"); + break; + case Simple: + out.print("Simple"); + break; + case TakesSlowPath: + out.print("TakesSlowPath"); + break; + case MakesCalls: + out.print("MakesCalls"); + break; + } + out.print(", ", listDump(m_variants), ", seenInJIT = ", m_wasSeenInJIT, ")"); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h new file mode 100644 index 000000000..d7f0ae496 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GetByIdStatus_h +#define GetByIdStatus_h + +#include "CallLinkStatus.h" +#include "CodeOrigin.h" +#include "ConcurrentJITLock.h" +#include "ExitingJITType.h" +#include "GetByIdVariant.h" +#include "StructureStubInfo.h" + +namespace JSC { + +class CodeBlock; + +class GetByIdStatus { +public: + enum State { + NoInformation, // It's uncached so we have no information. + Simple, // It's cached for a simple access to a known object property with + // a possible structure chain and a possible specific value. + TakesSlowPath, // It's known to often take slow path. + MakesCalls // It's known to take paths that make calls. + }; + + GetByIdStatus() + : m_state(NoInformation) + { + } + + explicit GetByIdStatus(State state) + : m_state(state) + { + ASSERT(state == NoInformation || state == TakesSlowPath || state == MakesCalls); + } + + GetByIdStatus( + State state, bool wasSeenInJIT, const GetByIdVariant& variant = GetByIdVariant()) + : m_state(state) + , m_wasSeenInJIT(wasSeenInJIT) + { + ASSERT((state == Simple) == variant.isSet()); + m_variants.append(variant); + } + + static GetByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid); + static GetByIdStatus computeFor(const StructureSet&, UniquedStringImpl* uid); + + static GetByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid); + +#if ENABLE(JIT) + static GetByIdStatus computeForStubInfo(const ConcurrentJITLocker&, CodeBlock* baselineBlock, StructureStubInfo*, CodeOrigin, UniquedStringImpl* uid); +#endif + + State state() const { return m_state; } + + bool isSet() const { return m_state != NoInformation; } + bool operator!() const { return !isSet(); } + bool isSimple() const { return m_state == Simple; } + + size_t numVariants() const { return m_variants.size(); } + const Vector<GetByIdVariant, 1>& variants() const { return m_variants; } + const GetByIdVariant& at(size_t index) const { return m_variants[index]; } + const GetByIdVariant& operator[](size_t index) const { return at(index); } + + bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; } + bool makesCalls() const; + + bool wasSeenInJIT() const { return m_wasSeenInJIT; } + + void dump(PrintStream&) const; + +private: +#if ENABLE(DFG_JIT) + static bool hasExitSite(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); +#endif +#if ENABLE(JIT) + static GetByIdStatus computeForStubInfoWithoutExitSiteFeedback( + const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo*, + UniquedStringImpl* uid, CallLinkStatus::ExitSiteData); +#endif + static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid); + + bool appendVariant(const GetByIdVariant&); + + State m_state; + Vector<GetByIdVariant, 1> m_variants; + bool m_wasSeenInJIT; +}; + +} // namespace JSC + +#endif // PropertyAccessStatus_h + diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp new file mode 100644 index 000000000..ea6fa12fb --- /dev/null +++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GetByIdVariant.h" + +#include "CallLinkStatus.h" +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +GetByIdVariant::GetByIdVariant( + const StructureSet& structureSet, PropertyOffset offset, + const ObjectPropertyConditionSet& conditionSet, + std::unique_ptr<CallLinkStatus> callLinkStatus) + : m_structureSet(structureSet) + , m_conditionSet(conditionSet) + , m_offset(offset) + , m_callLinkStatus(WTF::move(callLinkStatus)) +{ + if (!structureSet.size()) { + ASSERT(offset == invalidOffset); + ASSERT(conditionSet.isEmpty()); + } +} + +GetByIdVariant::~GetByIdVariant() { } + +GetByIdVariant::GetByIdVariant(const GetByIdVariant& other) + : GetByIdVariant() +{ + *this = other; +} + +GetByIdVariant& GetByIdVariant::operator=(const GetByIdVariant& other) +{ + m_structureSet = other.m_structureSet; + m_conditionSet = other.m_conditionSet; + m_offset = other.m_offset; + if (other.m_callLinkStatus) + m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus); + else + m_callLinkStatus = nullptr; + return *this; +} + +bool GetByIdVariant::attemptToMerge(const GetByIdVariant& other) +{ + if (m_offset != other.m_offset) + return false; + if (m_callLinkStatus || other.m_callLinkStatus) + return false; + + if (m_conditionSet.isEmpty() != other.m_conditionSet.isEmpty()) + return false; + + ObjectPropertyConditionSet mergedConditionSet; + if (!m_conditionSet.isEmpty()) { + mergedConditionSet = m_conditionSet.mergedWith(other.m_conditionSet); + if (!mergedConditionSet.isValid() || !mergedConditionSet.hasOneSlotBaseCondition()) + return false; + } + m_conditionSet = mergedConditionSet; + + m_structureSet.merge(other.m_structureSet); + + return true; +} + +void GetByIdVariant::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +void GetByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!isSet()) { + out.print("<empty>"); + return; + } + + out.print( + "<", inContext(structureSet(), context), ", ", inContext(m_conditionSet, context)); + out.print(", offset = ", offset()); + if (m_callLinkStatus) + out.print(", call = ", *m_callLinkStatus); + out.print(">"); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/GetByIdVariant.h b/Source/JavaScriptCore/bytecode/GetByIdVariant.h new file mode 100644 index 000000000..714fb9843 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/GetByIdVariant.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GetByIdVariant_h +#define GetByIdVariant_h + +#include "CallLinkStatus.h" +#include "JSCJSValue.h" +#include "ObjectPropertyConditionSet.h" +#include "PropertyOffset.h" +#include "StructureSet.h" + +namespace JSC { + +class CallLinkStatus; +class GetByIdStatus; +struct DumpContext; + +class GetByIdVariant { +public: + GetByIdVariant( + const StructureSet& structureSet = StructureSet(), PropertyOffset offset = invalidOffset, + const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(), + std::unique_ptr<CallLinkStatus> callLinkStatus = nullptr); + + ~GetByIdVariant(); + + GetByIdVariant(const GetByIdVariant&); + GetByIdVariant& operator=(const GetByIdVariant&); + + bool isSet() const { return !!m_structureSet.size(); } + bool operator!() const { return !isSet(); } + const StructureSet& structureSet() const { return m_structureSet; } + StructureSet& structureSet() { return m_structureSet; } + + // A non-empty condition set means that this is a prototype load. + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + + PropertyOffset offset() const { return m_offset; } + CallLinkStatus* callLinkStatus() const { return m_callLinkStatus.get(); } + + bool attemptToMerge(const GetByIdVariant& other); + + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + +private: + friend class GetByIdStatus; + + StructureSet m_structureSet; + ObjectPropertyConditionSet m_conditionSet; + PropertyOffset m_offset; + std::unique_ptr<CallLinkStatus> m_callLinkStatus; +}; + +} // namespace JSC + +#endif // GetByIdVariant_h + diff --git a/Source/JavaScriptCore/bytecode/HandlerInfo.h b/Source/JavaScriptCore/bytecode/HandlerInfo.h new file mode 100644 index 000000000..acdda08ed --- /dev/null +++ b/Source/JavaScriptCore/bytecode/HandlerInfo.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HandlerInfo_h +#define HandlerInfo_h + +#include "CodeLocation.h" + +namespace JSC { + +enum class HandlerType { + Illegal = 0, + Catch = 1, + Finally = 2, + SynthesizedFinally = 3 +}; + +struct HandlerInfoBase { + HandlerType type() const { return static_cast<HandlerType>(typeBits); } + void setType(HandlerType type) { typeBits = static_cast<uint32_t>(type); } + + const char* typeName() + { + switch (type()) { + case HandlerType::Catch: + return "catch"; + case HandlerType::Finally: + return "finally"; + case HandlerType::SynthesizedFinally: + return "synthesized finally"; + default: + ASSERT_NOT_REACHED(); + } + return nullptr; + } + + bool isCatchHandler() const { return type() == HandlerType::Catch; } + + uint32_t start; + uint32_t end; + uint32_t target; + uint32_t typeBits : 2; // HandlerType +}; + +struct UnlinkedHandlerInfo : public HandlerInfoBase { + UnlinkedHandlerInfo(uint32_t start, uint32_t end, uint32_t target, HandlerType handlerType) + { + this->start = start; + this->end = end; + this->target = target; + setType(handlerType); + ASSERT(type() == handlerType); + } +}; + +struct HandlerInfo : public HandlerInfoBase { + void initialize(const UnlinkedHandlerInfo& unlinkedInfo) + { + start = unlinkedInfo.start; + end = unlinkedInfo.end; + target = unlinkedInfo.target; + typeBits = unlinkedInfo.typeBits; + } + +#if ENABLE(JIT) + void initialize(const UnlinkedHandlerInfo& unlinkedInfo, CodeLocationLabel label) + { + initialize(unlinkedInfo); + nativeCode = label; + } + + CodeLocationLabel nativeCode; +#endif +}; + +} // namespace JSC + +#endif // HandlerInfo_h + diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp new file mode 100644 index 000000000..82e0f7fd1 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "InlineCallFrameSet.h" +#include "JSCInlines.h" + +namespace JSC { + +InlineCallFrameSet::InlineCallFrameSet() { } +InlineCallFrameSet::~InlineCallFrameSet() { } + +InlineCallFrame* InlineCallFrameSet::add() +{ + return m_frames.add(); +} + +void InlineCallFrameSet::visitAggregate(SlotVisitor& visitor) +{ + for (InlineCallFrame* callFrame : m_frames) + callFrame->visitAggregate(visitor); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h new file mode 100644 index 000000000..f9378e0a5 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/InlineCallFrameSet.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef InlineCallFrameSet_h +#define InlineCallFrameSet_h + +#include "CodeOrigin.h" +#include <wtf/Bag.h> +#include <wtf/RefCounted.h> + +namespace JSC { + +class InlineCallFrameSet : public RefCounted<InlineCallFrameSet> { +public: + InlineCallFrameSet(); + ~InlineCallFrameSet(); + + bool isEmpty() const { return m_frames.isEmpty(); } + + InlineCallFrame* add(); + + typedef Bag<InlineCallFrame>::iterator iterator; + iterator begin() { return m_frames.begin(); } + iterator end() { return m_frames.end(); } + + void visitAggregate(SlotVisitor&); + +private: + Bag<InlineCallFrame> m_frames; +}; + +} // namespace JSC + +#endif // InlineCallFrameSet_h + diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h new file mode 100644 index 000000000..c20a4f728 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Instruction.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Instruction_h +#define Instruction_h + +#include "BasicBlockLocation.h" +#include "MacroAssembler.h" +#include "Opcode.h" +#include "SymbolTable.h" +#include "TypeLocation.h" +#include "PropertySlot.h" +#include "SpecialPointer.h" +#include "Structure.h" +#include "StructureChain.h" +#include "ToThisStatus.h" +#include "VirtualRegister.h" +#include <wtf/VectorTraits.h> + +namespace JSC { + +class ArrayAllocationProfile; +class ArrayProfile; +class ObjectAllocationProfile; +class WatchpointSet; +struct LLIntCallLinkInfo; +struct ValueProfile; + +struct Instruction { + Instruction() + { + u.jsCell.clear(); + } + + Instruction(Opcode opcode) + { +#if !ENABLE(COMPUTED_GOTO_OPCODES) + // We have to initialize one of the pointer members to ensure that + // the entire struct is initialized, when opcode is not a pointer. + u.jsCell.clear(); +#endif + u.opcode = opcode; + } + + Instruction(int operand) + { + // We have to initialize one of the pointer members to ensure that + // the entire struct is initialized in 64-bit. + u.jsCell.clear(); + u.operand = operand; + } + + Instruction(VM& vm, JSCell* owner, Structure* structure) + { + u.structure.clear(); + u.structure.set(vm, owner, structure); + } + Instruction(VM& vm, JSCell* owner, StructureChain* structureChain) + { + u.structureChain.clear(); + u.structureChain.set(vm, owner, structureChain); + } + Instruction(VM& vm, JSCell* owner, JSCell* jsCell) + { + u.jsCell.clear(); + u.jsCell.set(vm, owner, jsCell); + } + + Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; } + + Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; } + Instruction(ValueProfile* profile) { u.profile = profile; } + Instruction(ArrayProfile* profile) { u.arrayProfile = profile; } + Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; } + Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; } + Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; } + Instruction(Special::Pointer pointer) { u.specialPointer = pointer; } + Instruction(UniquedStringImpl* uid) { u.uid = uid; } + Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; } + + union { + Opcode opcode; + int operand; + WriteBarrierBase<Structure> structure; + WriteBarrierBase<SymbolTable> symbolTable; + WriteBarrierBase<StructureChain> structureChain; + WriteBarrierBase<JSCell> jsCell; + WriteBarrier<Unknown>* variablePointer; + Special::Pointer specialPointer; + PropertySlot::GetValueFunc getterFunc; + LLIntCallLinkInfo* callLinkInfo; + UniquedStringImpl* uid; + ValueProfile* profile; + ArrayProfile* arrayProfile; + ArrayAllocationProfile* arrayAllocationProfile; + ObjectAllocationProfile* objectAllocationProfile; + WatchpointSet* watchpointSet; + void* pointer; + bool* predicatePointer; + ToThisStatus toThisStatus; + TypeLocation* location; + BasicBlockLocation* basicBlockLocation; + } u; + +private: + Instruction(StructureChain*); + Instruction(Structure*); +}; + +} // namespace JSC + +namespace WTF { + +template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { }; + +} // namespace WTF + +#endif // Instruction_h diff --git a/Source/JavaScriptCore/bytecode/JumpTable.cpp b/Source/JavaScriptCore/bytecode/JumpTable.cpp new file mode 100644 index 000000000..e22ad03c9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/JumpTable.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JumpTable.h" + +#include <wtf/text/StringHash.h> + +namespace JSC { + +int32_t SimpleJumpTable::offsetForValue(int32_t value, int32_t defaultOffset) +{ + if (value >= min && static_cast<uint32_t>(value - min) < branchOffsets.size()) { + int32_t offset = branchOffsets[value - min]; + if (offset) + return offset; + } + return defaultOffset; +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/JumpTable.h b/Source/JavaScriptCore/bytecode/JumpTable.h new file mode 100644 index 000000000..b83e842cb --- /dev/null +++ b/Source/JavaScriptCore/bytecode/JumpTable.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2008, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JumpTable_h +#define JumpTable_h + +#include "MacroAssembler.h" +#include <wtf/HashMap.h> +#include <wtf/Vector.h> +#include <wtf/text/StringImpl.h> + +namespace JSC { + + struct OffsetLocation { + int32_t branchOffset; +#if ENABLE(JIT) + CodeLocationLabel ctiOffset; +#endif + }; + + struct StringJumpTable { + typedef HashMap<RefPtr<StringImpl>, OffsetLocation> StringOffsetTable; + StringOffsetTable offsetTable; +#if ENABLE(JIT) + CodeLocationLabel ctiDefault; // FIXME: it should not be necessary to store this. +#endif + + inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset) + { + StringOffsetTable::const_iterator end = offsetTable.end(); + StringOffsetTable::const_iterator loc = offsetTable.find(value); + if (loc == end) + return defaultOffset; + return loc->value.branchOffset; + } + +#if ENABLE(JIT) + inline CodeLocationLabel ctiForValue(StringImpl* value) + { + StringOffsetTable::const_iterator end = offsetTable.end(); + StringOffsetTable::const_iterator loc = offsetTable.find(value); + if (loc == end) + return ctiDefault; + return loc->value.ctiOffset; + } +#endif + + void clear() + { + offsetTable.clear(); + } + }; + + struct SimpleJumpTable { + // FIXME: The two Vectors can be combind into one Vector<OffsetLocation> + Vector<int32_t> branchOffsets; + int32_t min; +#if ENABLE(JIT) + Vector<CodeLocationLabel> ctiOffsets; + CodeLocationLabel ctiDefault; +#endif + + int32_t offsetForValue(int32_t value, int32_t defaultOffset); + void add(int32_t key, int32_t offset) + { + if (!branchOffsets[key]) + branchOffsets[key] = offset; + } + +#if ENABLE(JIT) + void ensureCTITable() + { + ASSERT(ctiOffsets.isEmpty() || ctiOffsets.size() == branchOffsets.size()); + ctiOffsets.grow(branchOffsets.size()); + } + + inline CodeLocationLabel ctiForValue(int32_t value) + { + if (value >= min && static_cast<uint32_t>(value - min) < ctiOffsets.size()) + return ctiOffsets[value - min]; + return ctiDefault; + } +#endif + + void clear() + { + branchOffsets.clear(); +#if ENABLE(JIT) + ctiOffsets.clear(); +#endif + } + }; + +} // namespace JSC + +#endif // JumpTable_h diff --git a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h new file mode 100644 index 000000000..2645dd5be --- /dev/null +++ b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LLIntCallLinkInfo_h +#define LLIntCallLinkInfo_h + +#include "JSFunction.h" +#include "MacroAssemblerCodeRef.h" +#include <wtf/SentinelLinkedList.h> + +namespace JSC { + +struct Instruction; + +struct LLIntCallLinkInfo : public BasicRawSentinelNode<LLIntCallLinkInfo> { + LLIntCallLinkInfo() + { + } + + ~LLIntCallLinkInfo() + { + if (isOnList()) + remove(); + } + + bool isLinked() { return !!callee; } + + void unlink() + { + callee.clear(); + machineCodeTarget = MacroAssemblerCodePtr(); + if (isOnList()) + remove(); + } + + WriteBarrier<JSFunction> callee; + WriteBarrier<JSFunction> lastSeenCallee; + MacroAssemblerCodePtr machineCodeTarget; +}; + +} // namespace JSC + +#endif // LLIntCallLinkInfo_h + diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp new file mode 100644 index 000000000..de654db68 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "LazyOperandValueProfile.h" + +#include "JSCInlines.h" + +namespace JSC { + +CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { } +CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder() { } + +void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions(const ConcurrentJITLocker& locker) +{ + if (!m_data) + return; + + for (unsigned i = 0; i < m_data->size(); ++i) + m_data->at(i).computeUpdatedPrediction(locker); +} + +LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add( + const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key) +{ + if (!m_data) + m_data = std::make_unique<LazyOperandValueProfile::List>(); + else { + for (unsigned i = 0; i < m_data->size(); ++i) { + if (m_data->at(i).key() == key) + return &m_data->at(i); + } + } + + m_data->append(LazyOperandValueProfile(key)); + return &m_data->last(); +} + +LazyOperandValueProfileParser::LazyOperandValueProfileParser() { } +LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { } + +void LazyOperandValueProfileParser::initialize( + const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder) +{ + ASSERT(m_map.isEmpty()); + + if (!holder.m_data) + return; + + LazyOperandValueProfile::List& data = *holder.m_data; + for (unsigned i = 0; i < data.size(); ++i) + m_map.add(data[i].key(), &data[i]); +} + +LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent( + const LazyOperandValueProfileKey& key) const +{ + HashMap<LazyOperandValueProfileKey, LazyOperandValueProfile*>::const_iterator iter = + m_map.find(key); + + if (iter == m_map.end()) + return 0; + + return iter->value; +} + +SpeculatedType LazyOperandValueProfileParser::prediction( + const ConcurrentJITLocker& locker, const LazyOperandValueProfileKey& key) const +{ + LazyOperandValueProfile* profile = getIfPresent(key); + if (!profile) + return SpecNone; + + return profile->computeUpdatedPrediction(locker); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h new file mode 100644 index 000000000..74e4f3318 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LazyOperandValueProfile_h +#define LazyOperandValueProfile_h + +#include "ConcurrentJITLock.h" +#include "ValueProfile.h" +#include "VirtualRegister.h" +#include <wtf/HashMap.h> +#include <wtf/Noncopyable.h> +#include <wtf/SegmentedVector.h> + +namespace JSC { + +class ScriptExecutable; + +class LazyOperandValueProfileKey { +public: + LazyOperandValueProfileKey() + : m_bytecodeOffset(0) // 0 = empty value + , m_operand(VirtualRegister()) // not a valid operand index in our current scheme + { + } + + LazyOperandValueProfileKey(WTF::HashTableDeletedValueType) + : m_bytecodeOffset(1) // 1 = deleted value + , m_operand(VirtualRegister()) // not a valid operand index in our current scheme + { + } + + LazyOperandValueProfileKey(unsigned bytecodeOffset, VirtualRegister operand) + : m_bytecodeOffset(bytecodeOffset) + , m_operand(operand) + { + ASSERT(m_operand.isValid()); + } + + bool operator!() const + { + return !m_operand.isValid(); + } + + bool operator==(const LazyOperandValueProfileKey& other) const + { + return m_bytecodeOffset == other.m_bytecodeOffset + && m_operand == other.m_operand; + } + + unsigned hash() const + { + return WTF::intHash(m_bytecodeOffset) + m_operand.offset(); + } + + unsigned bytecodeOffset() const + { + ASSERT(!!*this); + return m_bytecodeOffset; + } + + VirtualRegister operand() const + { + ASSERT(!!*this); + return m_operand; + } + + bool isHashTableDeletedValue() const + { + return !m_operand.isValid() && m_bytecodeOffset; + } +private: + unsigned m_bytecodeOffset; + VirtualRegister m_operand; +}; + +struct LazyOperandValueProfileKeyHash { + static unsigned hash(const LazyOperandValueProfileKey& key) { return key.hash(); } + static bool equal( + const LazyOperandValueProfileKey& a, + const LazyOperandValueProfileKey& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::LazyOperandValueProfileKey> { + typedef JSC::LazyOperandValueProfileKeyHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::LazyOperandValueProfileKey> : public GenericHashTraits<JSC::LazyOperandValueProfileKey> { + static void constructDeletedValue(JSC::LazyOperandValueProfileKey& slot) { new (NotNull, &slot) JSC::LazyOperandValueProfileKey(HashTableDeletedValue); } + static bool isDeletedValue(const JSC::LazyOperandValueProfileKey& value) { return value.isHashTableDeletedValue(); } +}; + +} // namespace WTF + +namespace JSC { + +struct LazyOperandValueProfile : public MinimalValueProfile { + LazyOperandValueProfile() + : MinimalValueProfile() + , m_operand(VirtualRegister()) + { + } + + explicit LazyOperandValueProfile(const LazyOperandValueProfileKey& key) + : MinimalValueProfile(key.bytecodeOffset()) + , m_operand(key.operand()) + { + } + + LazyOperandValueProfileKey key() const + { + return LazyOperandValueProfileKey(m_bytecodeOffset, m_operand); + } + + VirtualRegister m_operand; + + typedef SegmentedVector<LazyOperandValueProfile, 8> List; +}; + +class LazyOperandValueProfileParser; + +class CompressedLazyOperandValueProfileHolder { + WTF_MAKE_NONCOPYABLE(CompressedLazyOperandValueProfileHolder); +public: + CompressedLazyOperandValueProfileHolder(); + ~CompressedLazyOperandValueProfileHolder(); + + void computeUpdatedPredictions(const ConcurrentJITLocker&); + + LazyOperandValueProfile* add( + const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key); + +private: + friend class LazyOperandValueProfileParser; + std::unique_ptr<LazyOperandValueProfile::List> m_data; +}; + +class LazyOperandValueProfileParser { + WTF_MAKE_NONCOPYABLE(LazyOperandValueProfileParser); +public: + explicit LazyOperandValueProfileParser(); + ~LazyOperandValueProfileParser(); + + void initialize( + const ConcurrentJITLocker&, CompressedLazyOperandValueProfileHolder& holder); + + LazyOperandValueProfile* getIfPresent( + const LazyOperandValueProfileKey& key) const; + + SpeculatedType prediction( + const ConcurrentJITLocker&, const LazyOperandValueProfileKey& key) const; +private: + HashMap<LazyOperandValueProfileKey, LazyOperandValueProfile*> m_map; +}; + +} // namespace JSC + +#endif // LazyOperandValueProfile_h + + diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp new file mode 100644 index 000000000..bec692ef7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "MethodOfGettingAValueProfile.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "JSCInlines.h" + +namespace JSC { + +MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand( + CodeBlock* codeBlock, const LazyOperandValueProfileKey& key) +{ + MethodOfGettingAValueProfile result; + result.m_kind = LazyOperand; + result.u.lazyOperand.codeBlock = codeBlock; + result.u.lazyOperand.bytecodeOffset = key.bytecodeOffset(); + result.u.lazyOperand.operand = key.operand().offset(); + return result; +} + +EncodedJSValue* MethodOfGettingAValueProfile::getSpecFailBucket(unsigned index) const +{ + switch (m_kind) { + case None: + return 0; + + case Ready: + return u.profile->specFailBucket(index); + + case LazyOperand: { + LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand)); + + ConcurrentJITLocker locker(u.lazyOperand.codeBlock->m_lock); + LazyOperandValueProfile* profile = + u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key); + return profile->specFailBucket(index); + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } +} + +} // namespace JSC + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h new file mode 100644 index 000000000..846f8cf7a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MethodOfGettingAValueProfile_h +#define MethodOfGettingAValueProfile_h + +// This is guarded by ENABLE_DFG_JIT only because it uses some value profiles +// that are currently only used if the DFG is enabled (i.e. they are not +// available in the profile-only configuration). Hopefully someday all of +// these #if's will disappear... +#if ENABLE(DFG_JIT) + +#include "JSCJSValue.h" + +namespace JSC { + +class CodeBlock; +class LazyOperandValueProfileKey; +struct ValueProfile; + +class MethodOfGettingAValueProfile { +public: + MethodOfGettingAValueProfile() + : m_kind(None) + { + } + + explicit MethodOfGettingAValueProfile(ValueProfile* profile) + { + if (profile) { + m_kind = Ready; + u.profile = profile; + } else + m_kind = None; + } + + static MethodOfGettingAValueProfile fromLazyOperand( + CodeBlock*, const LazyOperandValueProfileKey&); + + bool operator!() const { return m_kind == None; } + + // This logically has a pointer to a "There exists X such that + // ValueProfileBase<X>". But since C++ does not have existential + // templates, I cannot return it. So instead, for any methods that + // users of this class would like to call, we'll just have to provide + // a method here that does it through an indirection. Or we could + // possibly just make ValueProfile less template-based. But last I + // tried that, it felt more yucky than this class. + + EncodedJSValue* getSpecFailBucket(unsigned index) const; + +private: + enum Kind { + None, + Ready, + LazyOperand + }; + + Kind m_kind; + union { + ValueProfile* profile; + struct { + CodeBlock* codeBlock; + unsigned bytecodeOffset; + int operand; + } lazyOperand; + } u; +}; + +} // namespace JSC + +#endif // ENABLE(DFG_JIT) + +#endif // MethodOfGettingAValueProfile_h + diff --git a/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h new file mode 100644 index 000000000..5fa706d25 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectAllocationProfile.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ObjectAllocationProfile_h +#define ObjectAllocationProfile_h + +#include "VM.h" +#include "JSGlobalObject.h" +#include "ObjectPrototype.h" +#include "SlotVisitor.h" +#include "WriteBarrier.h" + +namespace JSC { + +class ObjectAllocationProfile { + friend class LLIntOffsetsExtractor; +public: + static ptrdiff_t offsetOfAllocator() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_allocator); } + static ptrdiff_t offsetOfStructure() { return OBJECT_OFFSETOF(ObjectAllocationProfile, m_structure); } + + ObjectAllocationProfile() + : m_allocator(0) + { + } + + bool isNull() { return !m_allocator; } + + void initialize(VM& vm, JSCell* owner, JSObject* prototype, unsigned inferredInlineCapacity) + { + ASSERT(!m_allocator); + ASSERT(!m_structure); + + unsigned inlineCapacity = 0; + if (inferredInlineCapacity < JSFinalObject::defaultInlineCapacity()) { + // Try to shrink the object based on static analysis. + inferredInlineCapacity += possibleDefaultPropertyCount(vm, prototype); + + if (!inferredInlineCapacity) { + // Empty objects are rare, so most likely the static analyzer just didn't + // see the real initializer function. This can happen with helper functions. + inferredInlineCapacity = JSFinalObject::defaultInlineCapacity(); + } else if (inferredInlineCapacity > JSFinalObject::defaultInlineCapacity()) { + // Default properties are weak guesses, so don't allow them to turn a small + // object into a large object. + inferredInlineCapacity = JSFinalObject::defaultInlineCapacity(); + } + + inlineCapacity = inferredInlineCapacity; + ASSERT(inlineCapacity < JSFinalObject::maxInlineCapacity()); + } else { + // Normal or large object. + inlineCapacity = inferredInlineCapacity; + if (inlineCapacity > JSFinalObject::maxInlineCapacity()) + inlineCapacity = JSFinalObject::maxInlineCapacity(); + } + + ASSERT(inlineCapacity > 0); + ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity()); + + size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity); + MarkedAllocator* allocator = &vm.heap.allocatorForObjectWithoutDestructor(allocationSize); + ASSERT(allocator->cellSize()); + + // Take advantage of extra inline capacity available in the size class. + size_t slop = (allocator->cellSize() - allocationSize) / sizeof(WriteBarrier<Unknown>); + inlineCapacity += slop; + if (inlineCapacity > JSFinalObject::maxInlineCapacity()) + inlineCapacity = JSFinalObject::maxInlineCapacity(); + + Structure* structure = vm.prototypeMap.emptyObjectStructureForPrototype(prototype, inlineCapacity); + + // Ensure that if another thread sees the structure, it will see it properly created + WTF::storeStoreFence(); + + m_allocator = allocator; + m_structure.set(vm, owner, structure); + } + + Structure* structure() + { + Structure* structure = m_structure.get(); + // Ensure that if we see the structure, it has been properly created + WTF::loadLoadFence(); + return structure; + } + unsigned inlineCapacity() { return structure()->inlineCapacity(); } + + void clear() + { + m_allocator = 0; + m_structure.clear(); + ASSERT(isNull()); + } + + void visitAggregate(SlotVisitor& visitor) + { + visitor.append(&m_structure); + } + +private: + + unsigned possibleDefaultPropertyCount(VM& vm, JSObject* prototype) + { + if (prototype == prototype->globalObject()->objectPrototype()) + return 0; + + size_t count = 0; + PropertyNameArray propertyNameArray(&vm, PropertyNameMode::StringsAndSymbols); + prototype->structure()->getPropertyNamesFromStructure(vm, propertyNameArray, EnumerationMode()); + PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArray.data()->propertyNameVector(); + for (size_t i = 0; i < propertyNameVector.size(); ++i) { + JSValue value = prototype->getDirect(vm, propertyNameVector[i]); + + // Functions are common, and are usually class-level objects that are not overridden. + if (jsDynamicCast<JSFunction*>(value)) + continue; + + ++count; + + } + return count; + } + + MarkedAllocator* m_allocator; // Precomputed to make things easier for generated code. + WriteBarrier<Structure> m_structure; +}; + +} // namespace JSC + +#endif // ObjectAllocationProfile_h diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp new file mode 100644 index 000000000..1f153b956 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.cpp @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ObjectPropertyCondition.h" + +#include "JSCInlines.h" +#include "TrackedReferences.h" + +namespace JSC { + +void ObjectPropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!*this) { + out.print("<invalid>"); + return; + } + + out.print("<", inContext(JSValue(m_object), context), ": ", inContext(m_condition, context), ">"); +} + +void ObjectPropertyCondition::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint( + Structure* structure) const +{ + return m_condition.isStillValidAssumingImpurePropertyWatchpoint(structure); +} + +bool ObjectPropertyCondition::structureEnsuresValidityAssumingImpurePropertyWatchpoint() const +{ + if (!*this) + return false; + + return structureEnsuresValidityAssumingImpurePropertyWatchpoint(m_object->structure()); +} + +bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const +{ + return m_condition.validityRequiresImpurePropertyWatchpoint(structure); +} + +bool ObjectPropertyCondition::validityRequiresImpurePropertyWatchpoint() const +{ + if (!*this) + return false; + + return validityRequiresImpurePropertyWatchpoint(m_object->structure()); +} + +bool ObjectPropertyCondition::isStillValid(Structure* structure) const +{ + return m_condition.isStillValid(structure, m_object); +} + +bool ObjectPropertyCondition::isStillValid() const +{ + if (!*this) + return false; + + return isStillValid(m_object->structure()); +} + +bool ObjectPropertyCondition::structureEnsuresValidity(Structure* structure) const +{ + return m_condition.isStillValid(structure); +} + +bool ObjectPropertyCondition::structureEnsuresValidity() const +{ + if (!*this) + return false; + + return structureEnsuresValidity(m_object->structure()); +} + +bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint( + Structure* structure, PropertyCondition::WatchabilityEffort effort) const +{ + return m_condition.isWatchableAssumingImpurePropertyWatchpoint(structure, m_object, effort); +} + +bool ObjectPropertyCondition::isWatchableAssumingImpurePropertyWatchpoint( + PropertyCondition::WatchabilityEffort effort) const +{ + if (!*this) + return false; + + return isWatchableAssumingImpurePropertyWatchpoint(m_object->structure(), effort); +} + +bool ObjectPropertyCondition::isWatchable( + Structure* structure, PropertyCondition::WatchabilityEffort effort) const +{ + return m_condition.isWatchable(structure, m_object, effort); +} + +bool ObjectPropertyCondition::isWatchable(PropertyCondition::WatchabilityEffort effort) const +{ + if (!*this) + return false; + + return isWatchable(m_object->structure(), effort); +} + +bool ObjectPropertyCondition::isStillLive() const +{ + if (!*this) + return false; + + if (!Heap::isMarked(m_object)) + return false; + + return m_condition.isStillLive(); +} + +void ObjectPropertyCondition::validateReferences(const TrackedReferences& tracked) const +{ + if (!*this) + return; + + tracked.check(m_object); + m_condition.validateReferences(tracked); +} + +ObjectPropertyCondition ObjectPropertyCondition::attemptToMakeEquivalenceWithoutBarrier() const +{ + PropertyCondition result = condition().attemptToMakeEquivalenceWithoutBarrier(object()); + if (!result) + return ObjectPropertyCondition(); + return ObjectPropertyCondition(object(), result); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h new file mode 100644 index 000000000..372e68aea --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyCondition.h @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ObjectPropertyCondition_h +#define ObjectPropertyCondition_h + +#include "JSObject.h" +#include "PropertyCondition.h" +#include <wtf/HashMap.h> + +namespace JSC { + +class TrackedReferences; + +class ObjectPropertyCondition { +public: + ObjectPropertyCondition() + : m_object(nullptr) + { + } + + ObjectPropertyCondition(WTF::HashTableDeletedValueType token) + : m_object(nullptr) + , m_condition(token) + { + } + + ObjectPropertyCondition(JSObject* object, const PropertyCondition& condition) + : m_object(object) + , m_condition(condition) + { + } + + static ObjectPropertyCondition presenceWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::presenceWithoutBarrier(uid, offset, attributes); + return result; + } + + static ObjectPropertyCondition presence( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyOffset offset, + unsigned attributes) + { + if (owner) + vm.heap.writeBarrier(owner); + return presenceWithoutBarrier(object, uid, offset, attributes); + } + + // NOTE: The prototype is the storedPrototype, not the prototypeForLookup. + static ObjectPropertyCondition absenceWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::absenceWithoutBarrier(uid, prototype); + return result; + } + + static ObjectPropertyCondition absence( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceWithoutBarrier(object, uid, prototype); + } + + static ObjectPropertyCondition absenceOfSetterWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::absenceOfSetterWithoutBarrier(uid, prototype); + return result; + } + + static ObjectPropertyCondition absenceOfSetter( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceOfSetterWithoutBarrier(object, uid, prototype); + } + + static ObjectPropertyCondition equivalenceWithoutBarrier( + JSObject* object, UniquedStringImpl* uid, JSValue value) + { + ObjectPropertyCondition result; + result.m_object = object; + result.m_condition = PropertyCondition::equivalenceWithoutBarrier(uid, value); + return result; + } + + static ObjectPropertyCondition equivalence( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, JSValue value) + { + if (owner) + vm.heap.writeBarrier(owner); + return equivalenceWithoutBarrier(object, uid, value); + } + + bool operator!() const { return !m_condition; }; + + JSObject* object() const { return m_object; } + PropertyCondition condition() const { return m_condition; } + + PropertyCondition::Kind kind() const { return condition().kind(); } + UniquedStringImpl* uid() const { return condition().uid(); } + bool hasOffset() const { return condition().hasOffset(); } + PropertyOffset offset() const { return condition().offset(); } + unsigned hasAttributes() const { return condition().hasAttributes(); } + unsigned attributes() const { return condition().attributes(); } + bool hasPrototype() const { return condition().hasPrototype(); } + JSObject* prototype() const { return condition().prototype(); } + bool hasRequiredValue() const { return condition().hasRequiredValue(); } + JSValue requiredValue() const { return condition().requiredValue(); } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + unsigned hash() const + { + return WTF::PtrHash<JSObject*>::hash(m_object) ^ m_condition.hash(); + } + + bool operator==(const ObjectPropertyCondition& other) const + { + return m_object == other.m_object + && m_condition == other.m_condition; + } + + bool isHashTableDeletedValue() const + { + return !m_object && m_condition.isHashTableDeletedValue(); + } + + // Two conditions are compatible if they are identical or if they speak of different uids or + // different objects. If false is returned, you have to decide how to resolve the conflict - + // for example if there is a Presence and an Equivalence then in some cases you'll want the + // more general of the two while in other cases you'll want the more specific of the two. This + // will also return false for contradictions, like Presence and Absence on the same + // object/uid. By convention, invalid conditions aren't compatible with anything. + bool isCompatibleWith(const ObjectPropertyCondition& other) const + { + if (!*this || !other) + return false; + return *this == other || uid() != other.uid() || object() != other.object(); + } + + // These validity-checking methods can optionally take a Struture* instead of loading the + // Structure* from the object. If you're in the concurrent JIT, then you must use the forms + // that take an explicit Structure* because you want the compiler to optimize for the same + // structure that you validated (i.e. avoid a TOCTOU race). + + // Checks if the object's structure claims that the property won't be intercepted. Validity + // does not require watchpoints on the object. + bool structureEnsuresValidityAssumingImpurePropertyWatchpoint(Structure*) const; + bool structureEnsuresValidityAssumingImpurePropertyWatchpoint() const; + + // Returns true if we need an impure property watchpoint to ensure validity even if + // isStillValidAccordingToStructure() returned true. + bool validityRequiresImpurePropertyWatchpoint(Structure*) const; + bool validityRequiresImpurePropertyWatchpoint() const; + + // Checks if the condition still holds. May conservatively return false, if the object and + // structure alone don't guarantee the condition. Note that this may return true if the + // condition still requires some watchpoints on the object in addition to checking the + // structure. If you want to check if the condition holds by using the structure alone, + // use structureEnsuresValidity(). + bool isStillValid(Structure*) const; + bool isStillValid() const; + + // Shorthand for condition().isStillValid(structure). + bool structureEnsuresValidity(Structure*) const; + bool structureEnsuresValidity() const; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure and possibly an impure property watchpoint. + bool isWatchableAssumingImpurePropertyWatchpoint( + Structure*, + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + bool isWatchableAssumingImpurePropertyWatchpoint( + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure. + bool isWatchable( + Structure*, + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + bool isWatchable( + PropertyCondition::WatchabilityEffort = PropertyCondition::MakeNoChanges) const; + + bool watchingRequiresStructureTransitionWatchpoint() const + { + return condition().watchingRequiresStructureTransitionWatchpoint(); + } + bool watchingRequiresReplacementWatchpoint() const + { + return condition().watchingRequiresReplacementWatchpoint(); + } + + // This means that the objects involved in this are still live. + bool isStillLive() const; + + void validateReferences(const TrackedReferences&) const; + + bool isValidValueForPresence(JSValue value) const + { + return condition().isValidValueForPresence(value); + } + + ObjectPropertyCondition attemptToMakeEquivalenceWithoutBarrier() const; + +private: + JSObject* m_object; + PropertyCondition m_condition; +}; + +struct ObjectPropertyConditionHash { + static unsigned hash(const ObjectPropertyCondition& key) { return key.hash(); } + static bool equal( + const ObjectPropertyCondition& a, const ObjectPropertyCondition& b) + { + return a == b; + } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::ObjectPropertyCondition> { + typedef JSC::ObjectPropertyConditionHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::ObjectPropertyCondition> : SimpleClassHashTraits<JSC::ObjectPropertyCondition> { }; + +} // namespace WTF + +#endif // ObjectPropertyCondition_h + diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp new file mode 100644 index 000000000..81860651f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.cpp @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ObjectPropertyConditionSet.h" + +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +ObjectPropertyCondition ObjectPropertyConditionSet::forObject(JSObject* object) const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (condition.object() == object) + return condition; + } + return ObjectPropertyCondition(); +} + +ObjectPropertyCondition ObjectPropertyConditionSet::forConditionKind( + PropertyCondition::Kind kind) const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (condition.kind() == kind) + return condition; + } + return ObjectPropertyCondition(); +} + +unsigned ObjectPropertyConditionSet::numberOfConditionsWithKind(PropertyCondition::Kind kind) const +{ + unsigned result = 0; + for (const ObjectPropertyCondition& condition : *this) { + if (condition.kind() == kind) + result++; + } + return result; +} + +bool ObjectPropertyConditionSet::hasOneSlotBaseCondition() const +{ + return numberOfConditionsWithKind(PropertyCondition::Presence) == 1; +} + +ObjectPropertyCondition ObjectPropertyConditionSet::slotBaseCondition() const +{ + ObjectPropertyCondition result; + unsigned numFound = 0; + for (const ObjectPropertyCondition& condition : *this) { + if (condition.kind() == PropertyCondition::Presence) { + result = condition; + numFound++; + } + } + RELEASE_ASSERT(numFound == 1); + return result; +} + +ObjectPropertyConditionSet ObjectPropertyConditionSet::mergedWith( + const ObjectPropertyConditionSet& other) const +{ + if (!isValid() || !other.isValid()) + return invalid(); + + Vector<ObjectPropertyCondition> result; + + if (!isEmpty()) + result.appendVector(m_data->vector); + + for (const ObjectPropertyCondition& newCondition : other) { + for (const ObjectPropertyCondition& existingCondition : *this) { + if (newCondition == existingCondition) + continue; + if (!newCondition.isCompatibleWith(existingCondition)) + return invalid(); + result.append(newCondition); + } + } + + return create(result); +} + +bool ObjectPropertyConditionSet::structuresEnsureValidity() const +{ + if (!isValid()) + return false; + + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.structureEnsuresValidity()) + return false; + } + return true; +} + +bool ObjectPropertyConditionSet::structuresEnsureValidityAssumingImpurePropertyWatchpoint() const +{ + if (!isValid()) + return false; + + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint()) + return false; + } + return true; +} + +bool ObjectPropertyConditionSet::needImpurePropertyWatchpoint() const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (condition.validityRequiresImpurePropertyWatchpoint()) + return true; + } + return false; +} + +bool ObjectPropertyConditionSet::areStillLive() const +{ + for (const ObjectPropertyCondition& condition : *this) { + if (!condition.isStillLive()) + return false; + } + return true; +} + +void ObjectPropertyConditionSet::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!isValid()) { + out.print("<invalid>"); + return; + } + + out.print("["); + if (m_data) + out.print(listDumpInContext(m_data->vector, context)); + out.print("]"); +} + +void ObjectPropertyConditionSet::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +namespace { + +bool verbose = false; + +ObjectPropertyCondition generateCondition( + VM& vm, JSCell* owner, JSObject* object, UniquedStringImpl* uid, PropertyCondition::Kind conditionKind) +{ + Structure* structure = object->structure(); + if (verbose) + dataLog("Creating condition ", conditionKind, " for ", pointerDump(structure), "\n"); + + ObjectPropertyCondition result; + switch (conditionKind) { + case PropertyCondition::Presence: { + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (offset == invalidOffset) + return ObjectPropertyCondition(); + result = ObjectPropertyCondition::presence(vm, owner, object, uid, offset, attributes); + break; + } + case PropertyCondition::Absence: { + result = ObjectPropertyCondition::absence( + vm, owner, object, uid, object->structure()->storedPrototypeObject()); + break; + } + case PropertyCondition::AbsenceOfSetter: { + result = ObjectPropertyCondition::absenceOfSetter( + vm, owner, object, uid, object->structure()->storedPrototypeObject()); + break; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + return ObjectPropertyCondition(); + } + + if (!result.structureEnsuresValidityAssumingImpurePropertyWatchpoint()) { + if (verbose) + dataLog("Failed to create condition: ", result, "\n"); + return ObjectPropertyCondition(); + } + + if (verbose) + dataLog("New condition: ", result, "\n"); + return result; +} + +enum Concurrency { + MainThread, + Concurrent +}; +template<typename Functor> +ObjectPropertyConditionSet generateConditions( + VM& vm, JSGlobalObject* globalObject, Structure* structure, JSObject* prototype, const Functor& functor, + Concurrency concurrency = MainThread) +{ + Vector<ObjectPropertyCondition> conditions; + + for (;;) { + if (verbose) + dataLog("Considering structure: ", pointerDump(structure), "\n"); + + if (structure->isProxy()) { + if (verbose) + dataLog("It's a proxy, so invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + + JSValue value = structure->prototypeForLookup(globalObject); + + if (value.isNull()) { + if (!prototype) { + if (verbose) + dataLog("Reached end up prototype chain as expected, done.\n"); + break; + } + if (verbose) + dataLog("Unexpectedly reached end of prototype chain, so invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + + JSObject* object = jsCast<JSObject*>(value); + structure = object->structure(vm); + + // Since we're accessing a prototype repeatedly, it's a good bet that it should not be + // treated as a dictionary. + if (structure->isDictionary()) { + if (concurrency == MainThread) + structure->flattenDictionaryStructure(vm, object); + else { + if (verbose) + dataLog("Cannot flatten dictionary when not on main thread, so invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + } + + if (!functor(conditions, object)) { + if (verbose) + dataLog("Functor failed, invalid.\n"); + return ObjectPropertyConditionSet::invalid(); + } + + if (object == prototype) { + if (verbose) + dataLog("Reached desired prototype, done.\n"); + break; + } + } + + if (verbose) + dataLog("Returning conditions: ", listDump(conditions), "\n"); + return ObjectPropertyConditionSet::create(conditions); +} + +} // anonymous namespace + +ObjectPropertyConditionSet generateConditionsForPropertyMiss( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, nullptr, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, PropertyCondition::Absence); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPropertySetterMiss( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, nullptr, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, PropertyCondition::AbsenceOfSetter); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, prototype, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + PropertyCondition::Kind kind = + object == prototype ? PropertyCondition::Presence : PropertyCondition::Absence; + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, kind); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom( + VM& vm, JSCell* owner, ExecState* exec, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid) +{ + return generateConditions( + vm, exec->lexicalGlobalObject(), headStructure, prototype, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + if (object == prototype) + return true; + ObjectPropertyCondition result = + generateCondition(vm, owner, object, uid, PropertyCondition::Absence); + if (!result) + return false; + conditions.append(result); + return true; + }); +} + +ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently( + VM& vm, JSGlobalObject* globalObject, Structure* headStructure, UniquedStringImpl* uid) +{ + return generateConditions( + vm, globalObject, headStructure, nullptr, + [&] (Vector<ObjectPropertyCondition>& conditions, JSObject* object) -> bool { + ObjectPropertyCondition result = + generateCondition(vm, nullptr, object, uid, PropertyCondition::AbsenceOfSetter); + if (!result) + return false; + conditions.append(result); + return true; + }, Concurrent); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h new file mode 100644 index 000000000..957eaac25 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ObjectPropertyConditionSet.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ObjectPropertyConditionSet_h +#define ObjectPropertyConditionSet_h + +#include "ObjectPropertyCondition.h" +#include <wtf/FastMalloc.h> +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +// An object property condition set is used to represent the set of additional conditions +// that need to be met for some heap access to be valid. The set can have the following +// interesting states: +// +// Empty: There are no special conditions that need to be met. +// Invalid: The heap access is never valid. +// Non-empty: The heap access is valid if all the ObjectPropertyConditions in the set are valid. + +class ObjectPropertyConditionSet { +public: + ObjectPropertyConditionSet() { } + + static ObjectPropertyConditionSet invalid() + { + ObjectPropertyConditionSet result; + result.m_data = adoptRef(new Data()); + return result; + } + + static ObjectPropertyConditionSet create(const Vector<ObjectPropertyCondition>& vector) + { + if (vector.isEmpty()) + return ObjectPropertyConditionSet(); + + ObjectPropertyConditionSet result; + result.m_data = adoptRef(new Data()); + result.m_data->vector = vector; + return result; + } + + bool isValid() const + { + return !m_data || !m_data->vector.isEmpty(); + } + + bool isEmpty() const + { + return !m_data; + } + + typedef const ObjectPropertyCondition* iterator; + + iterator begin() const + { + if (!m_data) + return nullptr; + return m_data->vector.begin(); + } + iterator end() const + { + if (!m_data) + return nullptr; + return m_data->vector.end(); + } + + ObjectPropertyCondition forObject(JSObject*) const; + ObjectPropertyCondition forConditionKind(PropertyCondition::Kind) const; + + unsigned numberOfConditionsWithKind(PropertyCondition::Kind) const; + + bool hasOneSlotBaseCondition() const; + + // If this is a condition set for a prototype hit, then this is guaranteed to return the + // condition on the prototype itself. This allows you to get the object, offset, and + // attributes for the prototype. This will RELEASE_ASSERT that there is exactly one Presence + // in the set, and it will return that presence. + ObjectPropertyCondition slotBaseCondition() const; + + // Attempt to create a new condition set by merging this one with the other one. This will + // fail if any of the conditions are incompatible with each other. When if fails, it returns + // invalid(). + ObjectPropertyConditionSet mergedWith(const ObjectPropertyConditionSet& other) const; + + bool structuresEnsureValidity() const; + bool structuresEnsureValidityAssumingImpurePropertyWatchpoint() const; + + bool needImpurePropertyWatchpoint() const; + bool areStillLive() const; + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + // Helpers for using this in a union. + void* releaseRawPointer() + { + return static_cast<void*>(m_data.leakRef()); + } + static ObjectPropertyConditionSet adoptRawPointer(void* rawPointer) + { + ObjectPropertyConditionSet result; + result.m_data = adoptRef(static_cast<Data*>(rawPointer)); + return result; + } + static ObjectPropertyConditionSet fromRawPointer(void* rawPointer) + { + ObjectPropertyConditionSet result; + result.m_data = static_cast<Data*>(rawPointer); + return result; + } + + // FIXME: Everything below here should be private, but cannot be because of a bug in VS. + + // Internally, this represents Invalid using a pointer to a Data that has an empty vector. + + // FIXME: This could be made more compact by having it internally use a vector that just has + // the non-uid portion of ObjectPropertyCondition, and then requiring that the callers of all + // of the APIs supply the uid. + + class Data : public ThreadSafeRefCounted<Data> { + WTF_MAKE_NONCOPYABLE(Data); + WTF_MAKE_FAST_ALLOCATED; + + public: + Data() { } + + Vector<ObjectPropertyCondition> vector; + }; + +private: + RefPtr<Data> m_data; +}; + +ObjectPropertyConditionSet generateConditionsForPropertyMiss( + VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid); +ObjectPropertyConditionSet generateConditionsForPropertySetterMiss( + VM&, JSCell* owner, ExecState*, Structure* headStructure, UniquedStringImpl* uid); +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHit( + VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid); +ObjectPropertyConditionSet generateConditionsForPrototypePropertyHitCustom( + VM&, JSCell* owner, ExecState*, Structure* headStructure, JSObject* prototype, + UniquedStringImpl* uid); + +ObjectPropertyConditionSet generateConditionsForPropertySetterMissConcurrently( + VM&, JSGlobalObject*, Structure* headStructure, UniquedStringImpl* uid); + +} // namespace JSC + +#endif // ObjectPropertyConditionSet_h + diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp new file mode 100644 index 000000000..3efa34934 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Opcode.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Opcode.h" + +#if ENABLE(OPCODE_STATS) +#include <array> +#include <wtf/DataLog.h> +#endif + +using namespace std; + +namespace JSC { + +const char* const opcodeNames[] = { +#define OPCODE_NAME_ENTRY(opcode, size) #opcode, + FOR_EACH_OPCODE_ID(OPCODE_NAME_ENTRY) +#undef OPCODE_NAME_ENTRY +}; + +#if ENABLE(OPCODE_STATS) + +long long OpcodeStats::opcodeCounts[numOpcodeIDs]; +long long OpcodeStats::opcodePairCounts[numOpcodeIDs][numOpcodeIDs]; +int OpcodeStats::lastOpcode = -1; + +static OpcodeStats logger; + +OpcodeStats::OpcodeStats() +{ + for (int i = 0; i < numOpcodeIDs; ++i) + opcodeCounts[i] = 0; + + for (int i = 0; i < numOpcodeIDs; ++i) + for (int j = 0; j < numOpcodeIDs; ++j) + opcodePairCounts[i][j] = 0; +} + +static int compareOpcodeIndices(const void* left, const void* right) +{ + long long leftValue = OpcodeStats::opcodeCounts[*(int*) left]; + long long rightValue = OpcodeStats::opcodeCounts[*(int*) right]; + + if (leftValue < rightValue) + return 1; + else if (leftValue > rightValue) + return -1; + else + return 0; +} + +static int compareOpcodePairIndices(const void* left, const void* right) +{ + std::pair<int, int> leftPair = *(pair<int, int>*) left; + long long leftValue = OpcodeStats::opcodePairCounts[leftPair.first][leftPair.second]; + std::pair<int, int> rightPair = *(pair<int, int>*) right; + long long rightValue = OpcodeStats::opcodePairCounts[rightPair.first][rightPair.second]; + + if (leftValue < rightValue) + return 1; + else if (leftValue > rightValue) + return -1; + else + return 0; +} + +OpcodeStats::~OpcodeStats() +{ + long long totalInstructions = 0; + for (int i = 0; i < numOpcodeIDs; ++i) + totalInstructions += opcodeCounts[i]; + + long long totalInstructionPairs = 0; + for (int i = 0; i < numOpcodeIDs; ++i) + for (int j = 0; j < numOpcodeIDs; ++j) + totalInstructionPairs += opcodePairCounts[i][j]; + + std::array<int, numOpcodeIDs> sortedIndices; + for (int i = 0; i < numOpcodeIDs; ++i) + sortedIndices[i] = i; + qsort(sortedIndices.data(), numOpcodeIDs, sizeof(int), compareOpcodeIndices); + + std::pair<int, int> sortedPairIndices[numOpcodeIDs * numOpcodeIDs]; + std::pair<int, int>* currentPairIndex = sortedPairIndices; + for (int i = 0; i < numOpcodeIDs; ++i) + for (int j = 0; j < numOpcodeIDs; ++j) + *(currentPairIndex++) = std::make_pair(i, j); + qsort(sortedPairIndices, numOpcodeIDs * numOpcodeIDs, sizeof(std::pair<int, int>), compareOpcodePairIndices); + + dataLogF("\nExecuted opcode statistics\n"); + + dataLogF("Total instructions executed: %lld\n\n", totalInstructions); + + dataLogF("All opcodes by frequency:\n\n"); + + for (int i = 0; i < numOpcodeIDs; ++i) { + int index = sortedIndices[i]; + dataLogF("%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCounts[index], ((double) opcodeCounts[index]) / ((double) totalInstructions) * 100.0); + } + + dataLogF("\n"); + dataLogF("2-opcode sequences by frequency: %lld\n\n", totalInstructions); + + for (int i = 0; i < numOpcodeIDs * numOpcodeIDs; ++i) { + std::pair<int, int> indexPair = sortedPairIndices[i]; + long long count = opcodePairCounts[indexPair.first][indexPair.second]; + + if (!count) + break; + + dataLogF("%s%s %s:%s %lld %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), count, ((double) count) / ((double) totalInstructionPairs) * 100.0); + } + + dataLogF("\n"); + dataLogF("Most common opcodes and sequences:\n"); + + for (int i = 0; i < numOpcodeIDs; ++i) { + int index = sortedIndices[i]; + long long opcodeCount = opcodeCounts[index]; + double opcodeProportion = ((double) opcodeCount) / ((double) totalInstructions); + if (opcodeProportion < 0.0001) + break; + dataLogF("\n%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCount, opcodeProportion * 100.0); + + for (int j = 0; j < numOpcodeIDs * numOpcodeIDs; ++j) { + std::pair<int, int> indexPair = sortedPairIndices[j]; + long long pairCount = opcodePairCounts[indexPair.first][indexPair.second]; + double pairProportion = ((double) pairCount) / ((double) totalInstructionPairs); + + if (!pairCount || pairProportion < 0.0001 || pairProportion < opcodeProportion / 100) + break; + + if (indexPair.first != index && indexPair.second != index) + continue; + + dataLogF(" %s%s %s:%s %lld - %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), pairCount, pairProportion * 100.0); + } + + } + dataLogF("\n"); +} + +void OpcodeStats::recordInstruction(int opcode) +{ + opcodeCounts[opcode]++; + + if (lastOpcode != -1) + opcodePairCounts[lastOpcode][opcode]++; + + lastOpcode = opcode; +} + +void OpcodeStats::resetLastInstruction() +{ + lastOpcode = -1; +} + +#endif + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h new file mode 100644 index 000000000..1ed48c2ce --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Opcode.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2008, 2009, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Opcode_h +#define Opcode_h + +#include "Bytecodes.h" +#include "LLIntOpcode.h" + +#include <algorithm> +#include <string.h> + +#include <wtf/Assertions.h> + +namespace JSC { + +#define FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, extension__) \ + FOR_EACH_BYTECODE_ID(macro) \ + extension__ + +#define FOR_EACH_CORE_OPCODE_ID(macro) \ + FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION(macro, /* No extension */ ) + +#define FOR_EACH_OPCODE_ID(macro) \ + FOR_EACH_CORE_OPCODE_ID_WITH_EXTENSION( \ + macro, \ + FOR_EACH_LLINT_OPCODE_EXTENSION(macro) \ + ) + + +#define OPCODE_ID_ENUM(opcode, length) opcode, + typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID; +#undef OPCODE_ID_ENUM + +const int maxOpcodeLength = 9; +#if !ENABLE(JIT) +const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; +#else +const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; +#endif + +#define OPCODE_ID_LENGTHS(id, length) const int id##_length = length; + FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS); +#undef OPCODE_ID_LENGTHS + +#define OPCODE_LENGTH(opcode) opcode##_length + +#define OPCODE_ID_LENGTH_MAP(opcode, length) length, + const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) }; +#undef OPCODE_ID_LENGTH_MAP + +#define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID); + FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID); +#undef VERIFY_OPCODE_ID + +#if ENABLE(COMPUTED_GOTO_OPCODES) +typedef void* Opcode; +#else +typedef OpcodeID Opcode; +#endif + +#define PADDING_STRING " " +#define PADDING_STRING_LENGTH static_cast<unsigned>(strlen(PADDING_STRING)) + +extern const char* const opcodeNames[]; + +inline const char* padOpcodeName(OpcodeID op, unsigned width) +{ + unsigned pad = width - strlen(opcodeNames[op]); + pad = std::min(pad, PADDING_STRING_LENGTH); + return PADDING_STRING + PADDING_STRING_LENGTH - pad; +} + +#undef PADDING_STRING_LENGTH +#undef PADDING_STRING + +#if ENABLE(OPCODE_STATS) + +struct OpcodeStats { + OpcodeStats(); + ~OpcodeStats(); + static long long opcodeCounts[numOpcodeIDs]; + static long long opcodePairCounts[numOpcodeIDs][numOpcodeIDs]; + static int lastOpcode; + + static void recordInstruction(int opcode); + static void resetLastInstruction(); +}; + +#endif + +inline size_t opcodeLength(OpcodeID opcode) +{ + switch (opcode) { +#define OPCODE_ID_LENGTHS(id, length) case id: return OPCODE_LENGTH(id); + FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS) +#undef OPCODE_ID_LENGTHS + } + RELEASE_ASSERT_NOT_REACHED(); + return 0; +} + +} // namespace JSC + +#endif // Opcode_h diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h new file mode 100644 index 000000000..78ddaa525 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Operands.h @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2011, 2012, 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Operands_h +#define Operands_h + +#include "CallFrame.h" +#include "JSObject.h" +#include "VirtualRegister.h" + +#include <wtf/PrintStream.h> +#include <wtf/Vector.h> + +namespace JSC { + +template<typename T> struct OperandValueTraits; + +template<typename T> +struct OperandValueTraits { + static T defaultValue() { return T(); } + static bool isEmptyForDump(const T& value) { return !value; } +}; + +enum OperandKind { ArgumentOperand, LocalOperand }; + +enum OperandsLikeTag { OperandsLike }; + +template<typename T, typename Traits = OperandValueTraits<T>> +class Operands { +public: + Operands() { } + + explicit Operands(size_t numArguments, size_t numLocals, const T& initialValue = Traits::defaultValue()) + { + m_arguments.fill(initialValue, numArguments); + m_locals.fill(initialValue, numLocals); + } + + template<typename U, typename OtherTraits> + explicit Operands(OperandsLikeTag, const Operands<U, OtherTraits>& other) + { + m_arguments.fill(Traits::defaultValue(), other.numberOfArguments()); + m_locals.fill(Traits::defaultValue(), other.numberOfLocals()); + } + + size_t numberOfArguments() const { return m_arguments.size(); } + size_t numberOfLocals() const { return m_locals.size(); } + + T& argument(size_t idx) { return m_arguments[idx]; } + const T& argument(size_t idx) const { return m_arguments[idx]; } + + T& local(size_t idx) { return m_locals[idx]; } + const T& local(size_t idx) const { return m_locals[idx]; } + + template<OperandKind operandKind> + size_t sizeFor() const + { + if (operandKind == ArgumentOperand) + return numberOfArguments(); + return numberOfLocals(); + } + template<OperandKind operandKind> + T& atFor(size_t idx) + { + if (operandKind == ArgumentOperand) + return argument(idx); + return local(idx); + } + template<OperandKind operandKind> + const T& atFor(size_t idx) const + { + if (operandKind == ArgumentOperand) + return argument(idx); + return local(idx); + } + + void ensureLocals(size_t size, const T& ensuredValue = Traits::defaultValue()) + { + if (size <= m_locals.size()) + return; + + size_t oldSize = m_locals.size(); + m_locals.resize(size); + for (size_t i = oldSize; i < m_locals.size(); ++i) + m_locals[i] = ensuredValue; + } + + void setLocal(size_t idx, const T& value) + { + ensureLocals(idx + 1); + + m_locals[idx] = value; + } + + T getLocal(size_t idx) + { + if (idx >= m_locals.size()) + return Traits::defaultValue(); + return m_locals[idx]; + } + + void setArgumentFirstTime(size_t idx, const T& value) + { + ASSERT(m_arguments[idx] == Traits::defaultValue()); + argument(idx) = value; + } + + void setLocalFirstTime(size_t idx, const T& value) + { + ASSERT(idx >= m_locals.size() || m_locals[idx] == Traits::defaultValue()); + setLocal(idx, value); + } + + T& operand(int operand) + { + if (operandIsArgument(operand)) { + int argument = VirtualRegister(operand).toArgument(); + return m_arguments[argument]; + } + + return m_locals[VirtualRegister(operand).toLocal()]; + } + + T& operand(VirtualRegister virtualRegister) + { + return operand(virtualRegister.offset()); + } + + const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); } + const T& operand(VirtualRegister operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); } + + bool hasOperand(int operand) const + { + if (operandIsArgument(operand)) + return true; + return static_cast<size_t>(VirtualRegister(operand).toLocal()) < numberOfLocals(); + } + bool hasOperand(VirtualRegister reg) const + { + return hasOperand(reg.offset()); + } + + void setOperand(int operand, const T& value) + { + if (operandIsArgument(operand)) { + int argument = VirtualRegister(operand).toArgument(); + m_arguments[argument] = value; + return; + } + + setLocal(VirtualRegister(operand).toLocal(), value); + } + + void setOperand(VirtualRegister virtualRegister, const T& value) + { + setOperand(virtualRegister.offset(), value); + } + + size_t size() const { return numberOfArguments() + numberOfLocals(); } + const T& at(size_t index) const + { + if (index < numberOfArguments()) + return m_arguments[index]; + return m_locals[index - numberOfArguments()]; + } + T& at(size_t index) + { + if (index < numberOfArguments()) + return m_arguments[index]; + return m_locals[index - numberOfArguments()]; + } + const T& operator[](size_t index) const { return at(index); } + T& operator[](size_t index) { return at(index); } + + bool isArgument(size_t index) const { return index < numberOfArguments(); } + bool isVariable(size_t index) const { return !isArgument(index); } + int argumentForIndex(size_t index) const + { + return index; + } + int variableForIndex(size_t index) const + { + return index - m_arguments.size(); + } + int operandForIndex(size_t index) const + { + if (index < numberOfArguments()) + return virtualRegisterForArgument(index).offset(); + return virtualRegisterForLocal(index - numberOfArguments()).offset(); + } + VirtualRegister virtualRegisterForIndex(size_t index) const + { + return VirtualRegister(operandForIndex(index)); + } + size_t indexForOperand(int operand) const + { + if (operandIsArgument(operand)) + return static_cast<size_t>(VirtualRegister(operand).toArgument()); + return static_cast<size_t>(VirtualRegister(operand).toLocal()) + numberOfArguments(); + } + size_t indexForOperand(VirtualRegister reg) const + { + return indexForOperand(reg.offset()); + } + + void setOperandFirstTime(int operand, const T& value) + { + if (operandIsArgument(operand)) { + setArgumentFirstTime(VirtualRegister(operand).toArgument(), value); + return; + } + + setLocalFirstTime(VirtualRegister(operand).toLocal(), value); + } + + void fill(T value) + { + for (size_t i = 0; i < m_arguments.size(); ++i) + m_arguments[i] = value; + for (size_t i = 0; i < m_locals.size(); ++i) + m_locals[i] = value; + } + + void clear() + { + fill(Traits::defaultValue()); + } + + bool operator==(const Operands& other) const + { + ASSERT(numberOfArguments() == other.numberOfArguments()); + ASSERT(numberOfLocals() == other.numberOfLocals()); + + return m_arguments == other.m_arguments && m_locals == other.m_locals; + } + + void dumpInContext(PrintStream& out, DumpContext* context) const; + void dump(PrintStream& out) const; + +private: + Vector<T, 8> m_arguments; + Vector<T, 16> m_locals; +}; + +} // namespace JSC + +#endif // Operands_h + diff --git a/Source/JavaScriptCore/bytecode/OperandsInlines.h b/Source/JavaScriptCore/bytecode/OperandsInlines.h new file mode 100644 index 000000000..c9dee88c7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/OperandsInlines.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef OperandsInlines_h +#define OperandsInlines_h + +#include "Operands.h" +#include <wtf/CommaPrinter.h> + +namespace JSC { + +template<typename T, typename Traits> +void Operands<T, Traits>::dumpInContext(PrintStream& out, DumpContext* context) const +{ + CommaPrinter comma(" "); + for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) { + if (Traits::isEmptyForDump(argument(argumentIndex))) + continue; + out.print(comma, "arg", argumentIndex, ":", inContext(argument(argumentIndex), context)); + } + for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) { + if (Traits::isEmptyForDump(local(localIndex))) + continue; + out.print(comma, "loc", localIndex, ":", inContext(local(localIndex), context)); + } +} + +template<typename T, typename Traits> +void Operands<T, Traits>::dump(PrintStream& out) const +{ + CommaPrinter comma(" "); + for (size_t argumentIndex = numberOfArguments(); argumentIndex--;) { + if (Traits::isEmptyForDump(argument(argumentIndex))) + continue; + out.print(comma, "arg", argumentIndex, ":", argument(argumentIndex)); + } + for (size_t localIndex = 0; localIndex < numberOfLocals(); ++localIndex) { + if (Traits::isEmptyForDump(local(localIndex))) + continue; + out.print(comma, "loc", localIndex, ":", local(localIndex)); + } +} + +} // namespace JSC + +#endif // OperandsInlines_h + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h new file mode 100644 index 000000000..f8b64750c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicAccessStructureList.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PolymorphicAccessStructureList_h +#define PolymorphicAccessStructureList_h + +#include "JITStubRoutine.h" +#include "Structure.h" +#include "StructureChain.h" + +#define POLYMORPHIC_LIST_CACHE_SIZE 8 + +namespace JSC { + +// *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel), +// If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't +// curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best +// solution for now - will need to something smarter if/when we actually want mixed-mode operation. + +#if ENABLE(JIT) +// Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream. +struct PolymorphicAccessStructureList { + WTF_MAKE_FAST_ALLOCATED; +public: + struct PolymorphicStubInfo { + bool isDirect : 1; + unsigned count : 31; + RefPtr<JITStubRoutine> stubRoutine; + WriteBarrier<Structure> base; + WriteBarrier<StructureChain> chain; + + PolymorphicStubInfo() + { + } + + void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool _isDirect) + { + stubRoutine = _stubRoutine; + base.set(vm, owner, _base); + isDirect = _isDirect; + count = 0; + } + + void set(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool _isDirect, unsigned _count) + { + stubRoutine = _stubRoutine; + base.set(vm, owner, _base); + chain.set(vm, owner, _chain); + isDirect = _isDirect; + count = _count; + } + } list[POLYMORPHIC_LIST_CACHE_SIZE]; + + PolymorphicAccessStructureList() + { + } + + PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect) + { + list[0].set(vm, owner, stubRoutine, firstBase, isDirect); + } + + PolymorphicAccessStructureList(VM& vm, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect, unsigned count) + { + list[0].set(vm, owner, stubRoutine, firstBase, firstChain, isDirect, count); + } + + bool visitWeak(int count) + { + for (int i = 0; i < count; ++i) { + PolymorphicStubInfo& info = list[i]; + if (!info.base) + continue; + + if (!Heap::isMarked(info.base.get())) + return false; + if (info.chain && !Heap::isMarked(info.chain.get())) + return false; + } + + return true; + } +}; + +#endif // ENABLE(JIT) + +} // namespace JSC + +#endif // PolymorphicAccessStructureList_h + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicGetByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicGetByIdList.cpp new file mode 100644 index 000000000..27e806d2c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicGetByIdList.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PolymorphicGetByIdList.h" + +#if ENABLE(JIT) + +#include "CodeBlock.h" +#include "Heap.h" +#include "JSCInlines.h" +#include "StructureStubInfo.h" + +namespace JSC { + +GetByIdAccess::GetByIdAccess( + VM& vm, JSCell* owner, AccessType type, PassRefPtr<JITStubRoutine> stubRoutine, + Structure* structure, const ObjectPropertyConditionSet& conditionSet) + : m_type(type) + , m_structure(vm, owner, structure) + , m_conditionSet(conditionSet) + , m_stubRoutine(stubRoutine) +{ +} + +GetByIdAccess::~GetByIdAccess() +{ +} + +GetByIdAccess GetByIdAccess::fromStructureStubInfo(StructureStubInfo& stubInfo) +{ + MacroAssemblerCodePtr initialSlowPath = + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase); + + GetByIdAccess result; + + RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self); + + result.m_type = SimpleInline; + result.m_structure.copyFrom(stubInfo.u.getByIdSelf.baseObjectStructure); + result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath); + + return result; +} + +bool GetByIdAccess::visitWeak(RepatchBuffer& repatchBuffer) const +{ + if (m_structure && !Heap::isMarked(m_structure.get())) + return false; + if (!m_conditionSet.areStillLive()) + return false; + if (!m_stubRoutine->visitWeak(repatchBuffer)) + return false; + return true; +} + +PolymorphicGetByIdList::PolymorphicGetByIdList(StructureStubInfo& stubInfo) +{ + if (stubInfo.accessType == access_unset) + return; + + m_list.append(GetByIdAccess::fromStructureStubInfo(stubInfo)); +} + +PolymorphicGetByIdList* PolymorphicGetByIdList::from(StructureStubInfo& stubInfo) +{ + if (stubInfo.accessType == access_get_by_id_list) + return stubInfo.u.getByIdList.list; + + ASSERT( + stubInfo.accessType == access_get_by_id_self + || stubInfo.accessType == access_unset); + + PolymorphicGetByIdList* result = new PolymorphicGetByIdList(stubInfo); + + stubInfo.initGetByIdList(result); + + return result; +} + +PolymorphicGetByIdList::~PolymorphicGetByIdList() { } + +MacroAssemblerCodePtr PolymorphicGetByIdList::currentSlowPathTarget( + StructureStubInfo& stubInfo) const +{ + if (isEmpty()) + return stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase); + return m_list.last().stubRoutine()->code().code(); +} + +void PolymorphicGetByIdList::addAccess(const GetByIdAccess& access) +{ + ASSERT(!isFull()); + // Make sure that the resizing optimizes for space, not time. + m_list.resizeToFit(m_list.size() + 1); + m_list.last() = access; +} + +bool PolymorphicGetByIdList::isFull() const +{ + ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE); + return size() == POLYMORPHIC_LIST_CACHE_SIZE; +} + +bool PolymorphicGetByIdList::isAlmostFull() const +{ + ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE); + return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1; +} + +bool PolymorphicGetByIdList::didSelfPatching() const +{ + for (unsigned i = size(); i--;) { + if (at(i).type() == GetByIdAccess::SimpleInline) + return true; + } + return false; +} + +bool PolymorphicGetByIdList::visitWeak(RepatchBuffer& repatchBuffer) const +{ + for (unsigned i = size(); i--;) { + if (!at(i).visitWeak(repatchBuffer)) + return false; + } + return true; +} + +} // namespace JSC + +#endif // ENABLE(JIT) + + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicGetByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicGetByIdList.h new file mode 100644 index 000000000..60476cc38 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicGetByIdList.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PolymorphicGetByIdList_h +#define PolymorphicGetByIdList_h + +#if ENABLE(JIT) + +#include "CodeOrigin.h" +#include "MacroAssembler.h" +#include "ObjectPropertyConditionSet.h" +#include "Opcode.h" +#include "Structure.h" +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; +struct StructureStubInfo; + +class GetByIdAccess { +public: + enum AccessType { + Invalid, + SimpleInline, // This is the patched inline access. + SimpleStub, // This is a stub. + WatchedStub, + Getter, + CustomGetter, + SimpleMiss, + }; + + GetByIdAccess() + : m_type(Invalid) + { + } + + GetByIdAccess( + VM&, JSCell* owner, AccessType, PassRefPtr<JITStubRoutine>, Structure*, + const ObjectPropertyConditionSet& = ObjectPropertyConditionSet()); + + ~GetByIdAccess(); + + static GetByIdAccess fromStructureStubInfo(StructureStubInfo&); + + bool isSet() const { return m_type != Invalid; } + bool operator!() const { return !isSet(); } + + AccessType type() const { return m_type; } + + Structure* structure() const { return m_structure.get(); } + + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + + JITStubRoutine* stubRoutine() const + { + ASSERT(isSet()); + return m_stubRoutine.get(); + } + + bool doesCalls() const { return type() == Getter || type() == CustomGetter; } + bool isWatched() const { return type() == WatchedStub; } + bool isSimple() const { return !doesCalls() && !isWatched(); } + + bool visitWeak(RepatchBuffer&) const; + +private: + friend class CodeBlock; + + AccessType m_type; + WriteBarrier<Structure> m_structure; + ObjectPropertyConditionSet m_conditionSet; + RefPtr<JITStubRoutine> m_stubRoutine; +}; + +class PolymorphicGetByIdList { + WTF_MAKE_FAST_ALLOCATED; +public: + // Either creates a new polymorphic get list, or returns the one that is already in + // place. + static PolymorphicGetByIdList* from(StructureStubInfo&); + + ~PolymorphicGetByIdList(); + + MacroAssemblerCodePtr currentSlowPathTarget(StructureStubInfo& stubInfo) const; + + void addAccess(const GetByIdAccess&); + + bool isEmpty() const { return m_list.isEmpty(); } + unsigned size() const { return m_list.size(); } + bool isFull() const; + bool isAlmostFull() const; // True if adding an element would make isFull() true. + const GetByIdAccess& at(unsigned i) const { return m_list[i]; } + const GetByIdAccess& operator[](unsigned i) const { return m_list[i]; } + + bool didSelfPatching() const; // Are any of the accesses SimpleInline? + + bool visitWeak(RepatchBuffer&) const; + +private: + friend class CodeBlock; + + PolymorphicGetByIdList(StructureStubInfo&); + + Vector<GetByIdAccess, 2> m_list; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // PolymorphicGetByIdList_h + diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp new file mode 100644 index 000000000..7eddb8621 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PolymorphicPutByIdList.h" + +#if ENABLE(JIT) + +#include "StructureStubInfo.h" + +namespace JSC { + +PutByIdAccess PutByIdAccess::fromStructureStubInfo(StructureStubInfo& stubInfo) +{ + MacroAssemblerCodePtr initialSlowPath = + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase); + + PutByIdAccess result; + + switch (stubInfo.accessType) { + case access_put_by_id_replace: + result.m_type = Replace; + result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure); + result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath); + break; + + case access_put_by_id_transition_direct: + case access_put_by_id_transition_normal: + result.m_type = Transition; + result.m_oldStructure.copyFrom(stubInfo.u.putByIdTransition.previousStructure); + result.m_newStructure.copyFrom(stubInfo.u.putByIdTransition.structure); + result.m_conditionSet = ObjectPropertyConditionSet::adoptRawPointer( + stubInfo.u.putByIdTransition.rawConditionSet); + result.m_stubRoutine = stubInfo.stubRoutine; + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + + return result; +} + +bool PutByIdAccess::visitWeak(RepatchBuffer& repatchBuffer) const +{ + if (!m_conditionSet.areStillLive()) + return false; + + switch (m_type) { + case Replace: + if (!Heap::isMarked(m_oldStructure.get())) + return false; + break; + case Transition: + if (!Heap::isMarked(m_oldStructure.get())) + return false; + if (!Heap::isMarked(m_newStructure.get())) + return false; + break; + case Setter: + case CustomSetter: + if (!Heap::isMarked(m_oldStructure.get())) + return false; + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + return false; + } + if (!m_stubRoutine->visitWeak(repatchBuffer)) + return false; + return true; +} + +PolymorphicPutByIdList::PolymorphicPutByIdList( + PutKind putKind, StructureStubInfo& stubInfo) + : m_kind(putKind) +{ + if (stubInfo.accessType != access_unset) + m_list.append(PutByIdAccess::fromStructureStubInfo(stubInfo)); +} + +PolymorphicPutByIdList* PolymorphicPutByIdList::from( + PutKind putKind, StructureStubInfo& stubInfo) +{ + if (stubInfo.accessType == access_put_by_id_list) + return stubInfo.u.putByIdList.list; + + ASSERT(stubInfo.accessType == access_put_by_id_replace + || stubInfo.accessType == access_put_by_id_transition_normal + || stubInfo.accessType == access_put_by_id_transition_direct + || stubInfo.accessType == access_unset); + + PolymorphicPutByIdList* result = + new PolymorphicPutByIdList(putKind, stubInfo); + + stubInfo.initPutByIdList(result); + + return result; +} + +PolymorphicPutByIdList::~PolymorphicPutByIdList() { } + +bool PolymorphicPutByIdList::isFull() const +{ + ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE); + return size() == POLYMORPHIC_LIST_CACHE_SIZE; +} + +bool PolymorphicPutByIdList::isAlmostFull() const +{ + ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE); + return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1; +} + +void PolymorphicPutByIdList::addAccess(const PutByIdAccess& putByIdAccess) +{ + ASSERT(!isFull()); + // Make sure that the resizing optimizes for space, not time. + m_list.resizeToFit(m_list.size() + 1); + m_list.last() = putByIdAccess; +} + +bool PolymorphicPutByIdList::visitWeak(RepatchBuffer& repatchBuffer) const +{ + for (unsigned i = 0; i < size(); ++i) { + if (!at(i).visitWeak(repatchBuffer)) + return false; + } + return true; +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h new file mode 100644 index 000000000..02939f311 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PolymorphicPutByIdList_h +#define PolymorphicPutByIdList_h + +#if ENABLE(JIT) + +#include "CodeOrigin.h" +#include "MacroAssembler.h" +#include "ObjectPropertyConditionSet.h" +#include "Opcode.h" +#include "PutKind.h" +#include "PutPropertySlot.h" +#include "Structure.h" +#include <wtf/Vector.h> + +namespace JSC { + +class CodeBlock; +struct StructureStubInfo; + +class PutByIdAccess { +public: + enum AccessType { + Invalid, + Transition, + Replace, + Setter, + CustomSetter + }; + + PutByIdAccess() + : m_type(Invalid) + { + } + + static PutByIdAccess transition( + VM& vm, + JSCell* owner, + Structure* oldStructure, + Structure* newStructure, + const ObjectPropertyConditionSet& conditionSet, + PassRefPtr<JITStubRoutine> stubRoutine) + { + PutByIdAccess result; + result.m_type = Transition; + result.m_oldStructure.set(vm, owner, oldStructure); + result.m_newStructure.set(vm, owner, newStructure); + result.m_conditionSet = conditionSet; + result.m_customSetter = 0; + result.m_stubRoutine = stubRoutine; + return result; + } + + static PutByIdAccess replace( + VM& vm, + JSCell* owner, + Structure* structure, + PassRefPtr<JITStubRoutine> stubRoutine) + { + PutByIdAccess result; + result.m_type = Replace; + result.m_oldStructure.set(vm, owner, structure); + result.m_customSetter = 0; + result.m_stubRoutine = stubRoutine; + return result; + } + + + static PutByIdAccess setter( + VM& vm, + JSCell* owner, + AccessType accessType, + Structure* structure, + const ObjectPropertyConditionSet& conditionSet, + PutPropertySlot::PutValueFunc customSetter, + PassRefPtr<JITStubRoutine> stubRoutine) + { + RELEASE_ASSERT(accessType == Setter || accessType == CustomSetter); + PutByIdAccess result; + result.m_oldStructure.set(vm, owner, structure); + result.m_type = accessType; + result.m_conditionSet = conditionSet; + result.m_customSetter = customSetter; + result.m_stubRoutine = stubRoutine; + return result; + } + + static PutByIdAccess fromStructureStubInfo(StructureStubInfo&); + + bool isSet() const { return m_type != Invalid; } + bool operator!() const { return !isSet(); } + + AccessType type() const { return m_type; } + + bool isTransition() const { return m_type == Transition; } + bool isReplace() const { return m_type == Replace; } + bool isSetter() const { return m_type == Setter; } + bool isCustom() const { return m_type == CustomSetter; } + + Structure* oldStructure() const + { + // Using this instead of isSet() to make this assertion robust against the possibility + // of additional access types being added. + ASSERT(isTransition() || isReplace() || isSetter() || isCustom()); + + return m_oldStructure.get(); + } + + Structure* structure() const + { + ASSERT(isReplace() || isSetter() || isCustom()); + return m_oldStructure.get(); + } + + Structure* newStructure() const + { + ASSERT(isTransition()); + return m_newStructure.get(); + } + + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + + JITStubRoutine* stubRoutine() const + { + ASSERT(isTransition() || isReplace() || isSetter() || isCustom()); + return m_stubRoutine.get(); + } + + PutPropertySlot::PutValueFunc customSetter() const + { + ASSERT(isCustom()); + return m_customSetter; + } + + bool visitWeak(RepatchBuffer&) const; + +private: + friend class CodeBlock; + + AccessType m_type; + WriteBarrier<Structure> m_oldStructure; + WriteBarrier<Structure> m_newStructure; + ObjectPropertyConditionSet m_conditionSet; + PutPropertySlot::PutValueFunc m_customSetter; + RefPtr<JITStubRoutine> m_stubRoutine; +}; + +class PolymorphicPutByIdList { + WTF_MAKE_FAST_ALLOCATED; +public: + // Either creates a new polymorphic put list, or returns the one that is already + // in place. + static PolymorphicPutByIdList* from(PutKind, StructureStubInfo&); + + ~PolymorphicPutByIdList(); + + MacroAssemblerCodePtr currentSlowPathTarget() const + { + return m_list.last().stubRoutine()->code().code(); + } + + void addAccess(const PutByIdAccess&); + + bool isEmpty() const { return m_list.isEmpty(); } + unsigned size() const { return m_list.size(); } + bool isFull() const; + bool isAlmostFull() const; // True if adding an element would make isFull() true. + const PutByIdAccess& at(unsigned i) const { return m_list[i]; } + const PutByIdAccess& operator[](unsigned i) const { return m_list[i]; } + + PutKind kind() const { return m_kind; } + + bool visitWeak(RepatchBuffer&) const; + +private: + friend class CodeBlock; + + // Initialize from a stub info; this will place one element in the list and it will + // be created by converting the stub info's put by id access information into our + // PutByIdAccess. + PolymorphicPutByIdList(PutKind, StructureStubInfo&); + + Vector<PutByIdAccess, 2> m_list; + PutKind m_kind; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // PolymorphicPutByIdList_h + diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp new file mode 100644 index 000000000..414dfd97d --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PreciseJumpTargets.h" + +#include "JSCInlines.h" + +namespace JSC { + +template <size_t vectorSize> +static void getJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Interpreter* interpreter, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out) +{ + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + Instruction* current = instructionsBegin + bytecodeOffset; + switch (opcodeID) { + case op_jmp: + out.append(bytecodeOffset + current[1].u.operand); + break; + case op_jtrue: + case op_jfalse: + case op_jeq_null: + case op_jneq_null: + out.append(bytecodeOffset + current[2].u.operand); + break; + case op_jneq_ptr: + case op_jless: + case op_jlesseq: + case op_jgreater: + case op_jgreatereq: + case op_jnless: + case op_jnlesseq: + case op_jngreater: + case op_jngreatereq: + out.append(bytecodeOffset + current[3].u.operand); + break; + case op_switch_imm: + case op_switch_char: { + SimpleJumpTable& table = codeBlock->switchJumpTable(current[1].u.operand); + for (unsigned i = table.branchOffsets.size(); i--;) + out.append(bytecodeOffset + table.branchOffsets[i]); + out.append(bytecodeOffset + current[2].u.operand); + break; + } + case op_switch_string: { + StringJumpTable& table = codeBlock->stringSwitchJumpTable(current[1].u.operand); + StringJumpTable::StringOffsetTable::iterator iter = table.offsetTable.begin(); + StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); + for (; iter != end; ++iter) + out.append(bytecodeOffset + iter->value.branchOffset); + out.append(bytecodeOffset + current[2].u.operand); + break; + } + case op_check_has_instance: + out.append(bytecodeOffset + current[4].u.operand); + break; + case op_loop_hint: + out.append(bytecodeOffset); + break; + default: + break; + } +} + +void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out) +{ + ASSERT(out.isEmpty()); + + // We will derive a superset of the jump targets that the code block thinks it has. + // So, if the code block claims there are none, then we are done. + if (!codeBlock->numberOfJumpTargets()) + return; + + for (unsigned i = codeBlock->numberOfExceptionHandlers(); i--;) + out.append(codeBlock->exceptionHandler(i).target); + + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + unsigned instructionCount = codeBlock->instructions().size(); + for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { + OpcodeID opcodeID = interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); + getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out); + bytecodeOffset += opcodeLengths[opcodeID]; + } + + std::sort(out.begin(), out.end()); + + // We will have duplicates, and we must remove them. + unsigned toIndex = 0; + unsigned fromIndex = 0; + unsigned lastValue = UINT_MAX; + while (fromIndex < out.size()) { + unsigned value = out[fromIndex++]; + if (value == lastValue) + continue; + out[toIndex++] = value; + lastValue = value; + } + out.resize(toIndex); + out.shrinkToFit(); +} + +void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, unsigned bytecodeOffset, Vector<unsigned, 1>& out) +{ + Interpreter* interpreter = codeBlock->vm()->interpreter; + Instruction* instructionsBegin = codeBlock->instructions().begin(); + getJumpTargetsForBytecodeOffset(codeBlock, interpreter, instructionsBegin, bytecodeOffset, out); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h new file mode 100644 index 000000000..852413d77 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PreciseJumpTargets_h +#define PreciseJumpTargets_h + +#include "CodeBlock.h" + +namespace JSC { + +// Return a sorted list of bytecode index that are the destination of a jump. +void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out); + +void findJumpTargetsForBytecodeOffset(CodeBlock*, unsigned bytecodeOffset, Vector<unsigned, 1>& out); + +} // namespace JSC + +#endif // PreciseJumpTargets_h + diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.cpp b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp new file mode 100644 index 000000000..8aab4eaec --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PropertyCondition.cpp @@ -0,0 +1,352 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PropertyCondition.h" + +#include "GetterSetter.h" +#include "JSCInlines.h" +#include "TrackedReferences.h" + +namespace JSC { + +static bool verbose = false; + +void PropertyCondition::dumpInContext(PrintStream& out, DumpContext* context) const +{ + if (!*this) { + out.print("<invalid>"); + return; + } + + out.print(m_kind, " of ", m_uid); + switch (m_kind) { + case Presence: + out.print(" at ", offset(), " with attributes ", attributes()); + return; + case Absence: + case AbsenceOfSetter: + out.print(" with prototype ", inContext(JSValue(prototype()), context)); + return; + case Equivalence: + out.print(" with ", inContext(requiredValue(), context)); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void PropertyCondition::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +bool PropertyCondition::isStillValidAssumingImpurePropertyWatchpoint( + Structure* structure, JSObject* base) const +{ + if (verbose) { + dataLog( + "Determining validity of ", *this, " with structure ", pointerDump(structure), " and base ", + JSValue(base), " assuming impure property watchpoints are set.\n"); + } + + if (!*this) { + if (verbose) + dataLog("Invalid because unset.\n"); + return false; + } + + if (!structure->propertyAccessesAreCacheable()) { + if (verbose) + dataLog("Invalid because accesses are not cacheable.\n"); + return false; + } + + switch (m_kind) { + case Presence: { + unsigned currentAttributes; + PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes); + if (currentOffset != offset() || currentAttributes != attributes()) { + if (verbose) { + dataLog( + "Invalid because we need offset, attributes to be ", offset(), ", ", attributes(), + " but they are ", currentOffset, ", ", currentAttributes, "\n"); + } + return false; + } + return true; + } + + case Absence: { + if (structure->isDictionary()) { + if (verbose) + dataLog("Invalid because it's a dictionary.\n"); + return false; + } + + PropertyOffset currentOffset = structure->getConcurrently(uid()); + if (currentOffset != invalidOffset) { + if (verbose) + dataLog("Invalid because the property exists at offset: ", currentOffset, "\n"); + return false; + } + + if (structure->storedPrototypeObject() != prototype()) { + if (verbose) { + dataLog( + "Invalid because the prototype is ", structure->storedPrototype(), " even though " + "it should have been ", JSValue(prototype()), "\n"); + } + return false; + } + + return true; + } + + case AbsenceOfSetter: { + if (structure->isDictionary()) { + if (verbose) + dataLog("Invalid because it's a dictionary.\n"); + return false; + } + + unsigned currentAttributes; + PropertyOffset currentOffset = structure->getConcurrently(uid(), currentAttributes); + if (currentOffset != invalidOffset) { + if (currentAttributes & (Accessor | CustomAccessor)) { + if (verbose) { + dataLog( + "Invalid because we expected not to have a setter, but we have one at offset ", + currentOffset, " with attributes ", currentAttributes, "\n"); + } + return false; + } + } + + if (structure->storedPrototypeObject() != prototype()) { + if (verbose) { + dataLog( + "Invalid because the prototype is ", structure->storedPrototype(), " even though " + "it should have been ", JSValue(prototype()), "\n"); + } + return false; + } + + return true; + } + + case Equivalence: { + if (!base || base->structure() != structure) { + // Conservatively return false, since we cannot verify this one without having the + // object. + if (verbose) { + dataLog( + "Invalid because we don't have a base or the base has the wrong structure: ", + RawPointer(base), "\n"); + } + return false; + } + + // FIXME: This is somewhat racy, and maybe more risky than we want. + // https://bugs.webkit.org/show_bug.cgi?id=134641 + + PropertyOffset currentOffset = structure->getConcurrently(uid()); + JSValue currentValue = base->getDirect(currentOffset); + if (currentValue != requiredValue()) { + if (verbose) { + dataLog( + "Invalid because the value is ", currentValue, " but we require ", requiredValue(), + "\n"); + } + return false; + } + + return true; + } } + + RELEASE_ASSERT_NOT_REACHED(); + return false; +} + +bool PropertyCondition::validityRequiresImpurePropertyWatchpoint(Structure* structure) const +{ + if (!*this) + return false; + + switch (m_kind) { + case Presence: + case Absence: + case Equivalence: + return structure->needImpurePropertyWatchpoint(); + default: + return false; + } +} + +bool PropertyCondition::isStillValid(Structure* structure, JSObject* base) const +{ + if (!isStillValidAssumingImpurePropertyWatchpoint(structure, base)) + return false; + + // Currently we assume that an impure property can cause a property to appear, and can also + // "shadow" an existing JS property on the same object. Hence it affects both presence and + // absence. It doesn't affect AbsenceOfSetter because impure properties aren't ever setters. + switch (m_kind) { + case Presence: + case Absence: + case Equivalence: + if (structure->typeInfo().hasImpureGetOwnPropertySlot()) + return false; + break; + default: + break; + } + + return true; +} + +bool PropertyCondition::isWatchableWhenValid( + Structure* structure, WatchabilityEffort effort) const +{ + if (structure->transitionWatchpointSetHasBeenInvalidated()) + return false; + + switch (m_kind) { + case Equivalence: { + PropertyOffset offset = structure->getConcurrently(uid()); + + // This method should only be called when some variant of isValid returned true, which + // implies that we already confirmed that the structure knows of the property. We should + // also have verified that the Structure is a cacheable dictionary, which means we + // shouldn't have a TOCTOU race either. + RELEASE_ASSERT(offset != invalidOffset); + + WatchpointSet* set; + switch (effort) { + case MakeNoChanges: + set = structure->propertyReplacementWatchpointSet(offset); + break; + case EnsureWatchability: + set = structure->ensurePropertyReplacementWatchpointSet( + *Heap::heap(structure)->vm(), offset); + break; + } + + if (!set || !set->isStillValid()) + return false; + + break; + } + + default: + break; + } + + return true; +} + +bool PropertyCondition::isWatchableAssumingImpurePropertyWatchpoint( + Structure* structure, JSObject* base, WatchabilityEffort effort) const +{ + return isStillValidAssumingImpurePropertyWatchpoint(structure, base) + && isWatchableWhenValid(structure, effort); +} + +bool PropertyCondition::isWatchable( + Structure* structure, JSObject* base, WatchabilityEffort effort) const +{ + return isStillValid(structure, base) + && isWatchableWhenValid(structure, effort); +} + +bool PropertyCondition::isStillLive() const +{ + if (hasPrototype() && prototype() && !Heap::isMarked(prototype())) + return false; + + if (hasRequiredValue() + && requiredValue() + && requiredValue().isCell() + && !Heap::isMarked(requiredValue().asCell())) + return false; + + return true; +} + +void PropertyCondition::validateReferences(const TrackedReferences& tracked) const +{ + if (hasPrototype()) + tracked.check(prototype()); + + if (hasRequiredValue()) + tracked.check(requiredValue()); +} + +bool PropertyCondition::isValidValueForAttributes(JSValue value, unsigned attributes) +{ + bool attributesClaimAccessor = !!(attributes & Accessor); + bool valueClaimsAccessor = !!jsDynamicCast<GetterSetter*>(value); + return attributesClaimAccessor == valueClaimsAccessor; +} + +bool PropertyCondition::isValidValueForPresence(JSValue value) const +{ + return isValidValueForAttributes(value, attributes()); +} + +PropertyCondition PropertyCondition::attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const +{ + Structure* structure = base->structure(); + if (!structure->isValidOffset(offset())) + return PropertyCondition(); + JSValue value = base->getDirect(offset()); + if (!isValidValueForPresence(value)) + return PropertyCondition(); + return equivalenceWithoutBarrier(uid(), value); +} + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream& out, JSC::PropertyCondition::Kind condition) +{ + switch (condition) { + case JSC::PropertyCondition::Presence: + out.print("Presence"); + return; + case JSC::PropertyCondition::Absence: + out.print("Absence"); + return; + case JSC::PropertyCondition::AbsenceOfSetter: + out.print("Absence"); + return; + case JSC::PropertyCondition::Equivalence: + out.print("Equivalence"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF diff --git a/Source/JavaScriptCore/bytecode/PropertyCondition.h b/Source/JavaScriptCore/bytecode/PropertyCondition.h new file mode 100644 index 000000000..bd08c3b9d --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PropertyCondition.h @@ -0,0 +1,338 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PropertyCondition_h +#define PropertyCondition_h + +#include "JSObject.h" +#include <wtf/HashMap.h> + +namespace JSC { + +class TrackedReferences; + +class PropertyCondition { +public: + enum Kind { + Presence, + Absence, + AbsenceOfSetter, + Equivalence // An adaptive watchpoint on this will be a pair of watchpoints, and when the structure transitions, we will set the replacement watchpoint on the new structure. + }; + + PropertyCondition() + : m_uid(nullptr) + , m_kind(Presence) + { + memset(&u, 0, sizeof(u)); + } + + PropertyCondition(WTF::HashTableDeletedValueType) + : m_uid(nullptr) + , m_kind(Absence) + { + memset(&u, 0, sizeof(u)); + } + + static PropertyCondition presenceWithoutBarrier(UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = Presence; + result.u.presence.offset = offset; + result.u.presence.attributes = attributes; + return result; + } + + static PropertyCondition presence( + VM&, JSCell*, UniquedStringImpl* uid, PropertyOffset offset, unsigned attributes) + { + return presenceWithoutBarrier(uid, offset, attributes); + } + + // NOTE: The prototype is the storedPrototype not the prototypeForLookup. + static PropertyCondition absenceWithoutBarrier(UniquedStringImpl* uid, JSObject* prototype) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = Absence; + result.u.absence.prototype = prototype; + return result; + } + + static PropertyCondition absence( + VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceWithoutBarrier(uid, prototype); + } + + static PropertyCondition absenceOfSetterWithoutBarrier( + UniquedStringImpl* uid, JSObject* prototype) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = AbsenceOfSetter; + result.u.absence.prototype = prototype; + return result; + } + + static PropertyCondition absenceOfSetter( + VM& vm, JSCell* owner, UniquedStringImpl* uid, JSObject* prototype) + { + if (owner) + vm.heap.writeBarrier(owner); + return absenceOfSetterWithoutBarrier(uid, prototype); + } + + static PropertyCondition equivalenceWithoutBarrier( + UniquedStringImpl* uid, JSValue value) + { + PropertyCondition result; + result.m_uid = uid; + result.m_kind = Equivalence; + result.u.equivalence.value = JSValue::encode(value); + return result; + } + + static PropertyCondition equivalence( + VM& vm, JSCell* owner, UniquedStringImpl* uid, JSValue value) + { + if (value.isCell() && owner) + vm.heap.writeBarrier(owner); + return equivalenceWithoutBarrier(uid, value); + } + + bool operator!() const { return !m_uid && m_kind == Presence; }; + + Kind kind() const { return m_kind; } + UniquedStringImpl* uid() const { return m_uid; } + + bool hasOffset() const { return !!*this && m_kind == Presence; }; + PropertyOffset offset() const + { + ASSERT(hasOffset()); + return u.presence.offset; + } + bool hasAttributes() const { return !!*this && m_kind == Presence; }; + unsigned attributes() const + { + ASSERT(hasAttributes()); + return u.presence.attributes; + } + + bool hasPrototype() const { return !!*this && (m_kind == Absence || m_kind == AbsenceOfSetter); } + JSObject* prototype() const + { + ASSERT(hasPrototype()); + return u.absence.prototype; + } + + bool hasRequiredValue() const { return !!*this && m_kind == Equivalence; } + JSValue requiredValue() const + { + ASSERT(hasRequiredValue()); + return JSValue::decode(u.equivalence.value); + } + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + unsigned hash() const + { + unsigned result = WTF::PtrHash<UniquedStringImpl*>::hash(m_uid) + static_cast<unsigned>(m_kind); + switch (m_kind) { + case Presence: + result ^= u.presence.offset; + result ^= u.presence.attributes; + break; + case Absence: + case AbsenceOfSetter: + result ^= WTF::PtrHash<JSObject*>::hash(u.absence.prototype); + break; + case Equivalence: + result ^= EncodedJSValueHash::hash(u.equivalence.value); + break; + } + return result; + } + + bool operator==(const PropertyCondition& other) const + { + if (m_uid != other.m_uid) + return false; + if (m_kind != other.m_kind) + return false; + switch (m_kind) { + case Presence: + return u.presence.offset == other.u.presence.offset + && u.presence.attributes == other.u.presence.attributes; + case Absence: + case AbsenceOfSetter: + return u.absence.prototype == other.u.absence.prototype; + case Equivalence: + return u.equivalence.value == other.u.equivalence.value; + } + RELEASE_ASSERT_NOT_REACHED(); + return false; + } + + bool isHashTableDeletedValue() const + { + return !m_uid && m_kind == Absence; + } + + // Two conditions are compatible if they are identical or if they speak of different uids. If + // false is returned, you have to decide how to resolve the conflict - for example if there is + // a Presence and an Equivalence then in some cases you'll want the more general of the two + // while in other cases you'll want the more specific of the two. This will also return false + // for contradictions, like Presence and Absence on the same uid. By convention, invalid + // conditions aren't compatible with anything. + bool isCompatibleWith(const PropertyCondition& other) const + { + if (!*this || !other) + return false; + return *this == other || uid() != other.uid(); + } + + // Checks if the object's structure claims that the property won't be intercepted. + bool isStillValidAssumingImpurePropertyWatchpoint(Structure*, JSObject* base = nullptr) const; + + // Returns true if we need an impure property watchpoint to ensure validity even if + // isStillValidAccordingToStructure() returned true. + bool validityRequiresImpurePropertyWatchpoint(Structure*) const; + + // Checks if the condition is still valid right now for the given object and structure. + // May conservatively return false, if the object and structure alone don't guarantee the + // condition. This happens for an Absence condition on an object that may have impure + // properties. If the object is not supplied, then a "true" return indicates that checking if + // an object has the given structure guarantees the condition still holds. If an object is + // supplied, then you may need to use some other watchpoints on the object to guarantee the + // condition in addition to the structure check. + bool isStillValid(Structure*, JSObject* base = nullptr) const; + + // In some cases, the condition is not watchable, but could be made watchable by enabling the + // appropriate watchpoint. For example, replacement watchpoints are enabled only when some + // access is cached on the property in some structure. This is mainly to save space for + // dictionary properties or properties that never get very hot. But, it's always safe to + // enable watching, provided that this is called from the main thread. + enum WatchabilityEffort { + // This is the default. It means that we don't change the state of any Structure or + // object, and implies that if the property happens not to be watchable then we don't make + // it watchable. This is mandatory if calling from a JIT thread. This is also somewhat + // preferable when first deciding whether to watch a condition for the first time (i.e. + // not from a watchpoint fire that causes us to see if we should adapt), since a + // watchpoint not being initialized for watching implies that maybe we don't know enough + // yet to make it profitable to watch -- as in, the thing being watched may not have + // stabilized yet. We prefer to only assume that a condition will hold if it has been + // known to hold for a while already. + MakeNoChanges, + + // Do what it takes to ensure that the property can be watched, if doing so has no + // user-observable effect. For now this just means that we will ensure that a property + // replacement watchpoint is enabled if it hadn't been enabled already. Do not use this + // from JIT threads, since the act of enabling watchpoints is not thread-safe. + EnsureWatchability + }; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure and possibly an impure property watchpoint. + bool isWatchableAssumingImpurePropertyWatchpoint( + Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const; + + // This means that it's still valid and we could enforce validity by setting a transition + // watchpoint on the structure. + bool isWatchable( + Structure*, JSObject* base = nullptr, WatchabilityEffort = MakeNoChanges) const; + + bool watchingRequiresStructureTransitionWatchpoint() const + { + // Currently, this is required for all of our conditions. + return !!*this; + } + bool watchingRequiresReplacementWatchpoint() const + { + return !!*this && m_kind == Equivalence; + } + + // This means that the objects involved in this are still live. + bool isStillLive() const; + + void validateReferences(const TrackedReferences&) const; + + static bool isValidValueForAttributes(JSValue value, unsigned attributes); + + bool isValidValueForPresence(JSValue) const; + + PropertyCondition attemptToMakeEquivalenceWithoutBarrier(JSObject* base) const; + +private: + bool isWatchableWhenValid(Structure*, WatchabilityEffort) const; + + UniquedStringImpl* m_uid; + Kind m_kind; + union { + struct { + PropertyOffset offset; + unsigned attributes; + } presence; + struct { + JSObject* prototype; + } absence; + struct { + EncodedJSValue value; + } equivalence; + } u; +}; + +struct PropertyConditionHash { + static unsigned hash(const PropertyCondition& key) { return key.hash(); } + static bool equal( + const PropertyCondition& a, const PropertyCondition& b) + { + return a == b; + } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::PropertyCondition::Kind); + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::PropertyCondition> { + typedef JSC::PropertyConditionHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::PropertyCondition> : SimpleClassHashTraits<JSC::PropertyCondition> { }; + +} // namespace WTF + +#endif // PropertyCondition_h + diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp new file mode 100644 index 000000000..cc5da3b19 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp @@ -0,0 +1,430 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PutByIdStatus.h" + +#include "AccessorCallJITStubRoutine.h" +#include "CodeBlock.h" +#include "ComplexGetStatus.h" +#include "LLIntData.h" +#include "LowLevelInterpreter.h" +#include "JSCInlines.h" +#include "PolymorphicPutByIdList.h" +#include "Structure.h" +#include "StructureChain.h" +#include <wtf/ListDump.h> + +namespace JSC { + +bool PutByIdStatus::appendVariant(const PutByIdVariant& variant) +{ + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].attemptToMerge(variant)) + return true; + } + for (unsigned i = 0; i < m_variants.size(); ++i) { + if (m_variants[i].oldStructure().overlaps(variant.oldStructure())) + return false; + } + m_variants.append(variant); + return true; +} + +#if ENABLE(DFG_JIT) +bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex) +{ + return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) + || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache)); + +} +#endif + +PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) +{ + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(uid); + Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; + + Structure* structure = instruction[4].u.structure.get(); + if (!structure) + return PutByIdStatus(NoInformation); + + if (instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id) + || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_out_of_line)) { + PropertyOffset offset = structure->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(NoInformation); + + return PutByIdVariant::replace(structure, offset); + } + + ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); + + ASSERT(instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct) + || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal) + || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct_out_of_line) + || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line)); + + Structure* newStructure = instruction[6].u.structure.get(); + + PropertyOffset offset = newStructure->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(NoInformation); + + ObjectPropertyConditionSet conditionSet; + if (instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal) + || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line)) { + conditionSet = + generateConditionsForPropertySetterMissConcurrently( + *profiledBlock->vm(), profiledBlock->globalObject(), structure, uid); + if (!conditionSet.isValid()) + return PutByIdStatus(NoInformation); + } + + return PutByIdVariant::transition(structure, newStructure, conditionSet, offset); +} + +PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) +{ + ConcurrentJITLocker locker(profiledBlock->m_lock); + + UNUSED_PARAM(profiledBlock); + UNUSED_PARAM(bytecodeIndex); + UNUSED_PARAM(uid); +#if ENABLE(DFG_JIT) + if (hasExitSite(locker, profiledBlock, bytecodeIndex)) + return PutByIdStatus(TakesSlowPath); + + StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); + PutByIdStatus result = computeForStubInfo( + locker, profiledBlock, stubInfo, uid, + CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); + if (!result) + return computeFromLLInt(profiledBlock, bytecodeIndex, uid); + + return result; +#else // ENABLE(JIT) + UNUSED_PARAM(map); + return PutByIdStatus(NoInformation); +#endif // ENABLE(JIT) +} + +#if ENABLE(JIT) +PutByIdStatus PutByIdStatus::computeForStubInfo( + const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, + UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) +{ + if (!stubInfo) + return PutByIdStatus(); + + if (stubInfo->tookSlowPath) + return PutByIdStatus(TakesSlowPath); + + if (!stubInfo->seen) + return PutByIdStatus(); + + switch (stubInfo->accessType) { + case access_unset: + // If the JIT saw it but didn't optimize it, then assume that this takes slow path. + return PutByIdStatus(TakesSlowPath); + + case access_put_by_id_replace: { + PropertyOffset offset = + stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(uid); + if (isValidOffset(offset)) { + return PutByIdVariant::replace( + stubInfo->u.putByIdReplace.baseObjectStructure.get(), offset); + } + return PutByIdStatus(TakesSlowPath); + } + + case access_put_by_id_transition_normal: + case access_put_by_id_transition_direct: { + ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); + PropertyOffset offset = + stubInfo->u.putByIdTransition.structure->getConcurrently(uid); + if (isValidOffset(offset)) { + ObjectPropertyConditionSet conditionSet = ObjectPropertyConditionSet::fromRawPointer( + stubInfo->u.putByIdTransition.rawConditionSet); + if (!conditionSet.structuresEnsureValidity()) + return PutByIdStatus(TakesSlowPath); + return PutByIdVariant::transition( + stubInfo->u.putByIdTransition.previousStructure.get(), + stubInfo->u.putByIdTransition.structure.get(), + conditionSet, offset); + } + return PutByIdStatus(TakesSlowPath); + } + + case access_put_by_id_list: { + PolymorphicPutByIdList* list = stubInfo->u.putByIdList.list; + + PutByIdStatus result; + result.m_state = Simple; + + State slowPathState = TakesSlowPath; + for (unsigned i = 0; i < list->size(); ++i) { + const PutByIdAccess& access = list->at(i); + + switch (access.type()) { + case PutByIdAccess::Setter: + case PutByIdAccess::CustomSetter: + slowPathState = MakesCalls; + break; + default: + break; + } + } + + for (unsigned i = 0; i < list->size(); ++i) { + const PutByIdAccess& access = list->at(i); + + PutByIdVariant variant; + + switch (access.type()) { + case PutByIdAccess::Replace: { + Structure* structure = access.structure(); + PropertyOffset offset = structure->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(slowPathState); + variant = PutByIdVariant::replace(structure, offset); + break; + } + + case PutByIdAccess::Transition: { + PropertyOffset offset = + access.newStructure()->getConcurrently(uid); + if (!isValidOffset(offset)) + return PutByIdStatus(slowPathState); + ObjectPropertyConditionSet conditionSet = access.conditionSet(); + if (!conditionSet.structuresEnsureValidity()) + return PutByIdStatus(slowPathState); + variant = PutByIdVariant::transition( + access.oldStructure(), access.newStructure(), conditionSet, offset); + break; + } + + case PutByIdAccess::Setter: { + Structure* structure = access.structure(); + + ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( + structure, access.conditionSet(), uid); + + switch (complexGetStatus.kind()) { + case ComplexGetStatus::ShouldSkip: + continue; + + case ComplexGetStatus::TakesSlowPath: + return PutByIdStatus(slowPathState); + + case ComplexGetStatus::Inlineable: { + AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>( + access.stubRoutine()); + std::unique_ptr<CallLinkStatus> callLinkStatus = + std::make_unique<CallLinkStatus>( + CallLinkStatus::computeFor( + locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData)); + + variant = PutByIdVariant::setter( + structure, complexGetStatus.offset(), complexGetStatus.conditionSet(), + WTF::move(callLinkStatus)); + } } + break; + } + + case PutByIdAccess::CustomSetter: + return PutByIdStatus(MakesCalls); + + default: + return PutByIdStatus(slowPathState); + } + + if (!result.appendVariant(variant)) + return PutByIdStatus(slowPathState); + } + + return result; + } + + default: + return PutByIdStatus(TakesSlowPath); + } +} +#endif + +PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) +{ +#if ENABLE(DFG_JIT) + if (dfgBlock) { + CallLinkStatus::ExitSiteData exitSiteData; + { + ConcurrentJITLocker locker(baselineBlock->m_lock); + if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex)) + return PutByIdStatus(TakesSlowPath); + exitSiteData = CallLinkStatus::computeExitSiteData( + locker, baselineBlock, codeOrigin.bytecodeIndex); + } + + PutByIdStatus result; + { + ConcurrentJITLocker locker(dfgBlock->m_lock); + result = computeForStubInfo( + locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); + } + + // We use TakesSlowPath in some cases where the stub was unset. That's weird and + // it would be better not to do that. But it means that we have to defend + // ourselves here. + if (result.isSimple()) + return result; + } +#else + UNUSED_PARAM(dfgBlock); + UNUSED_PARAM(dfgMap); +#endif + + return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid); +} + +PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect) +{ + if (parseIndex(*uid)) + return PutByIdStatus(TakesSlowPath); + + if (set.isEmpty()) + return PutByIdStatus(); + + PutByIdStatus result; + result.m_state = Simple; + for (unsigned i = 0; i < set.size(); ++i) { + Structure* structure = set[i]; + + if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) + return PutByIdStatus(TakesSlowPath); + + if (!structure->propertyAccessesAreCacheable()) + return PutByIdStatus(TakesSlowPath); + + unsigned attributes; + PropertyOffset offset = structure->getConcurrently(uid, attributes); + if (isValidOffset(offset)) { + if (attributes & CustomAccessor) + return PutByIdStatus(MakesCalls); + + if (attributes & (Accessor | ReadOnly)) + return PutByIdStatus(TakesSlowPath); + + WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset); + if (!replaceSet || replaceSet->isStillValid()) { + // When this executes, it'll create, and fire, this replacement watchpoint set. + // That means that this has probably never executed or that something fishy is + // going on. Also, we cannot create or fire the watchpoint set from the concurrent + // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy. + // So, better leave this alone and take slow path. + return PutByIdStatus(TakesSlowPath); + } + + if (!result.appendVariant(PutByIdVariant::replace(structure, offset))) + return PutByIdStatus(TakesSlowPath); + continue; + } + + // Our hypothesis is that we're doing a transition. Before we prove that this is really + // true, we want to do some sanity checks. + + // Don't cache put transitions on dictionaries. + if (structure->isDictionary()) + return PutByIdStatus(TakesSlowPath); + + // If the structure corresponds to something that isn't an object, then give up, since + // we don't want to be adding properties to strings. + if (!structure->typeInfo().isObject()) + return PutByIdStatus(TakesSlowPath); + + ObjectPropertyConditionSet conditionSet; + if (!isDirect) { + conditionSet = generateConditionsForPropertySetterMissConcurrently( + globalObject->vm(), globalObject, structure, uid); + if (!conditionSet.isValid()) + return PutByIdStatus(TakesSlowPath); + } + + // We only optimize if there is already a structure that the transition is cached to. + Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset); + if (!transition) + return PutByIdStatus(TakesSlowPath); + ASSERT(isValidOffset(offset)); + + bool didAppend = result.appendVariant( + PutByIdVariant::transition(structure, transition, conditionSet, offset)); + if (!didAppend) + return PutByIdStatus(TakesSlowPath); + } + + return result; +} + +bool PutByIdStatus::makesCalls() const +{ + if (m_state == MakesCalls) + return true; + + if (m_state != Simple) + return false; + + for (unsigned i = m_variants.size(); i--;) { + if (m_variants[i].makesCalls()) + return true; + } + + return false; +} + +void PutByIdStatus::dump(PrintStream& out) const +{ + switch (m_state) { + case NoInformation: + out.print("(NoInformation)"); + return; + + case Simple: + out.print("(", listDump(m_variants), ")"); + return; + + case TakesSlowPath: + out.print("(TakesSlowPath)"); + return; + case MakesCalls: + out.print("(MakesCalls)"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h new file mode 100644 index 000000000..652ccc18a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PutByIdStatus_h +#define PutByIdStatus_h + +#include "CallLinkStatus.h" +#include "ExitingJITType.h" +#include "PutByIdVariant.h" +#include "StructureStubInfo.h" +#include <wtf/text/StringImpl.h> + +namespace JSC { + +class CodeBlock; +class VM; +class JSGlobalObject; +class Structure; +class StructureChain; + +class PutByIdStatus { +public: + enum State { + // It's uncached so we have no information. + NoInformation, + // It's cached as a simple store of some kind. + Simple, + // It's known to often take slow path. + TakesSlowPath, + // It's known to take paths that make calls. + MakesCalls + }; + + PutByIdStatus() + : m_state(NoInformation) + { + } + + explicit PutByIdStatus(State state) + : m_state(state) + { + ASSERT(m_state == NoInformation || m_state == TakesSlowPath || m_state == MakesCalls); + } + + PutByIdStatus(const PutByIdVariant& variant) + : m_state(Simple) + { + m_variants.append(variant); + } + + static PutByIdStatus computeFor(CodeBlock*, StubInfoMap&, unsigned bytecodeIndex, UniquedStringImpl* uid); + static PutByIdStatus computeFor(JSGlobalObject*, const StructureSet&, UniquedStringImpl* uid, bool isDirect); + + static PutByIdStatus computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin, UniquedStringImpl* uid); + + State state() const { return m_state; } + + bool isSet() const { return m_state != NoInformation; } + bool operator!() const { return m_state == NoInformation; } + bool isSimple() const { return m_state == Simple; } + bool takesSlowPath() const { return m_state == TakesSlowPath || m_state == MakesCalls; } + bool makesCalls() const; + + size_t numVariants() const { return m_variants.size(); } + const Vector<PutByIdVariant, 1>& variants() const { return m_variants; } + const PutByIdVariant& at(size_t index) const { return m_variants[index]; } + const PutByIdVariant& operator[](size_t index) const { return at(index); } + + void dump(PrintStream&) const; + +private: +#if ENABLE(DFG_JIT) + static bool hasExitSite(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex); +#endif +#if ENABLE(JIT) + static PutByIdStatus computeForStubInfo( + const ConcurrentJITLocker&, CodeBlock*, StructureStubInfo*, UniquedStringImpl* uid, + CallLinkStatus::ExitSiteData); +#endif + static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, UniquedStringImpl* uid); + + bool appendVariant(const PutByIdVariant&); + + State m_state; + Vector<PutByIdVariant, 1> m_variants; +}; + +} // namespace JSC + +#endif // PutByIdStatus_h + diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp new file mode 100644 index 000000000..e1b94ef13 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.cpp @@ -0,0 +1,237 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "PutByIdVariant.h" + +#include "CallLinkStatus.h" +#include "JSCInlines.h" +#include <wtf/ListDump.h> + +namespace JSC { + +PutByIdVariant::PutByIdVariant(const PutByIdVariant& other) + : PutByIdVariant() +{ + *this = other; +} + +PutByIdVariant& PutByIdVariant::operator=(const PutByIdVariant& other) +{ + m_kind = other.m_kind; + m_oldStructure = other.m_oldStructure; + m_newStructure = other.m_newStructure; + m_conditionSet = other.m_conditionSet; + m_offset = other.m_offset; + if (other.m_callLinkStatus) + m_callLinkStatus = std::make_unique<CallLinkStatus>(*other.m_callLinkStatus); + else + m_callLinkStatus = nullptr; + return *this; +} + +PutByIdVariant PutByIdVariant::replace(const StructureSet& structure, PropertyOffset offset) +{ + PutByIdVariant result; + result.m_kind = Replace; + result.m_oldStructure = structure; + result.m_offset = offset; + return result; +} + +PutByIdVariant PutByIdVariant::transition( + const StructureSet& oldStructure, Structure* newStructure, + const ObjectPropertyConditionSet& conditionSet, PropertyOffset offset) +{ + PutByIdVariant result; + result.m_kind = Transition; + result.m_oldStructure = oldStructure; + result.m_newStructure = newStructure; + result.m_conditionSet = conditionSet; + result.m_offset = offset; + return result; +} + +PutByIdVariant PutByIdVariant::setter( + const StructureSet& structure, PropertyOffset offset, + const ObjectPropertyConditionSet& conditionSet, + std::unique_ptr<CallLinkStatus> callLinkStatus) +{ + PutByIdVariant result; + result.m_kind = Setter; + result.m_oldStructure = structure; + result.m_conditionSet = conditionSet; + result.m_offset = offset; + result.m_callLinkStatus = WTF::move(callLinkStatus); + return result; +} + +Structure* PutByIdVariant::oldStructureForTransition() const +{ + ASSERT(kind() == Transition); + ASSERT(m_oldStructure.size() <= 2); + for (unsigned i = m_oldStructure.size(); i--;) { + Structure* structure = m_oldStructure[i]; + if (structure != m_newStructure) + return structure; + } + RELEASE_ASSERT_NOT_REACHED(); + + return nullptr; +} + +bool PutByIdVariant::writesStructures() const +{ + switch (kind()) { + case Transition: + case Setter: + return true; + default: + return false; + } +} + +bool PutByIdVariant::reallocatesStorage() const +{ + switch (kind()) { + case Transition: + return oldStructureForTransition()->outOfLineCapacity() != newStructure()->outOfLineCapacity(); + case Setter: + return true; + default: + return false; + } +} + +bool PutByIdVariant::makesCalls() const +{ + return kind() == Setter; +} + +bool PutByIdVariant::attemptToMerge(const PutByIdVariant& other) +{ + if (m_offset != other.m_offset) + return false; + + switch (m_kind) { + case Replace: + switch (other.m_kind) { + case Replace: { + ASSERT(m_conditionSet.isEmpty()); + ASSERT(other.m_conditionSet.isEmpty()); + + m_oldStructure.merge(other.m_oldStructure); + return true; + } + + case Transition: { + PutByIdVariant newVariant = other; + if (newVariant.attemptToMergeTransitionWithReplace(*this)) { + *this = newVariant; + return true; + } + return false; + } + + default: + return false; + } + + case Transition: + switch (other.m_kind) { + case Replace: + return attemptToMergeTransitionWithReplace(other); + + default: + return false; + } + + default: + return false; + } +} + +bool PutByIdVariant::attemptToMergeTransitionWithReplace(const PutByIdVariant& replace) +{ + ASSERT(m_kind == Transition); + ASSERT(replace.m_kind == Replace); + ASSERT(m_offset == replace.m_offset); + ASSERT(!replace.writesStructures()); + ASSERT(!replace.reallocatesStorage()); + ASSERT(replace.conditionSet().isEmpty()); + + // This sort of merging only works when we have one path along which we add a new field which + // transitions to structure S while the other path was already on structure S. This doesn't + // work if we need to reallocate anything or if the replace path is polymorphic. + + if (reallocatesStorage()) + return false; + + if (replace.m_oldStructure.onlyStructure() != m_newStructure) + return false; + + m_oldStructure.merge(m_newStructure); + return true; +} + +void PutByIdVariant::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +void PutByIdVariant::dumpInContext(PrintStream& out, DumpContext* context) const +{ + switch (kind()) { + case NotSet: + out.print("<empty>"); + return; + + case Replace: + out.print( + "<Replace: ", inContext(structure(), context), ", offset = ", offset(), ">"); + return; + + case Transition: + out.print( + "<Transition: ", inContext(oldStructure(), context), " -> ", + pointerDumpInContext(newStructure(), context), ", [", + inContext(m_conditionSet, context), "], offset = ", offset(), ">"); + return; + + case Setter: + out.print( + "<Setter: ", inContext(structure(), context), ", [", + inContext(m_conditionSet, context), "]"); + out.print(", offset = ", m_offset); + out.print(", call = ", *m_callLinkStatus); + out.print(">"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/PutByIdVariant.h b/Source/JavaScriptCore/bytecode/PutByIdVariant.h new file mode 100644 index 000000000..657cdac62 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutByIdVariant.h @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PutByIdVariant_h +#define PutByIdVariant_h + +#include "ObjectPropertyConditionSet.h" +#include "PropertyOffset.h" +#include "StructureSet.h" + +namespace JSC { + +class CallLinkStatus; + +class PutByIdVariant { +public: + enum Kind { + NotSet, + Replace, + Transition, + Setter + }; + + PutByIdVariant() + : m_kind(NotSet) + , m_newStructure(nullptr) + , m_offset(invalidOffset) + { + } + + PutByIdVariant(const PutByIdVariant&); + PutByIdVariant& operator=(const PutByIdVariant&); + + static PutByIdVariant replace(const StructureSet&, PropertyOffset); + + static PutByIdVariant transition( + const StructureSet& oldStructure, Structure* newStructure, + const ObjectPropertyConditionSet&, PropertyOffset); + + static PutByIdVariant setter( + const StructureSet&, PropertyOffset, const ObjectPropertyConditionSet&, + std::unique_ptr<CallLinkStatus>); + + Kind kind() const { return m_kind; } + + bool isSet() const { return kind() != NotSet; } + bool operator!() const { return !isSet(); } + + const StructureSet& structure() const + { + ASSERT(kind() == Replace || kind() == Setter); + return m_oldStructure; + } + + const StructureSet& structureSet() const + { + return structure(); + } + + const StructureSet& oldStructure() const + { + ASSERT(kind() == Transition || kind() == Replace || kind() == Setter); + return m_oldStructure; + } + + StructureSet& oldStructure() + { + ASSERT(kind() == Transition || kind() == Replace || kind() == Setter); + return m_oldStructure; + } + + Structure* oldStructureForTransition() const; + + Structure* newStructure() const + { + ASSERT(kind() == Transition); + return m_newStructure; + } + + bool writesStructures() const; + bool reallocatesStorage() const; + bool makesCalls() const; + + const ObjectPropertyConditionSet& conditionSet() const { return m_conditionSet; } + + PropertyOffset offset() const + { + ASSERT(isSet()); + return m_offset; + } + + CallLinkStatus* callLinkStatus() const + { + ASSERT(kind() == Setter); + return m_callLinkStatus.get(); + } + + bool attemptToMerge(const PutByIdVariant& other); + + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + +private: + bool attemptToMergeTransitionWithReplace(const PutByIdVariant& replace); + + Kind m_kind; + StructureSet m_oldStructure; + Structure* m_newStructure; + ObjectPropertyConditionSet m_conditionSet; + PropertyOffset m_offset; + std::unique_ptr<CallLinkStatus> m_callLinkStatus; +}; + +} // namespace JSC + +#endif // PutByIdVariant_h + diff --git a/Source/JavaScriptCore/bytecode/PutKind.h b/Source/JavaScriptCore/bytecode/PutKind.h new file mode 100644 index 000000000..7a1dd642e --- /dev/null +++ b/Source/JavaScriptCore/bytecode/PutKind.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PutKind_h +#define PutKind_h + +namespace JSC { + +enum PutKind { Direct, NotDirect }; + +} // namespace JSC + +#endif // PutKind_h + diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp b/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp new file mode 100644 index 000000000..d1f25b01f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ReduceWhitespace.h" + +#include <wtf/ASCIICType.h> +#include <wtf/StringPrintStream.h> + +namespace JSC { + +CString reduceWhitespace(const CString& input) +{ + StringPrintStream out; + + const char* data = input.data(); + + for (unsigned i = 0; i < input.length();) { + if (isASCIISpace(data[i])) { + while (i < input.length() && isASCIISpace(data[i])) + ++i; + out.print(CharacterDump(' ')); + continue; + } + out.print(CharacterDump(data[i])); + ++i; + } + + return out.toCString(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ReduceWhitespace.h b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h new file mode 100644 index 000000000..121caf2c2 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ReduceWhitespace.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ReduceWhitespace_h +#define ReduceWhitespace_h + +#include <wtf/text/CString.h> + +namespace JSC { + +// Replace all whitespace runs with a single space. +CString reduceWhitespace(const CString&); + +} // namespace JSC + +#endif // ReduceWhitespace_h diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp new file mode 100644 index 000000000..f5bf2b72a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp @@ -0,0 +1,479 @@ +/* + * Copyright (C) 2008, 2009, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "SamplingTool.h" + +#include "CodeBlock.h" +#include "Interpreter.h" +#include "Opcode.h" +#include "JSCInlines.h" + +#if !OS(WINDOWS) +#include <unistd.h> +#endif + +namespace JSC { + +#if ENABLE(SAMPLING_FLAGS) + +void SamplingFlags::sample() +{ + uint32_t mask = static_cast<uint32_t>(1 << 31); + unsigned index; + + for (index = 0; index < 32; ++index) { + if (mask & s_flags) + break; + mask >>= 1; + } + + s_flagCounts[32 - index]++; +} + +void SamplingFlags::start() +{ + for (unsigned i = 0; i <= 32; ++i) + s_flagCounts[i] = 0; +} +void SamplingFlags::stop() +{ + uint64_t total = 0; + for (unsigned i = 0; i <= 32; ++i) + total += s_flagCounts[i]; + + if (total) { + dataLogF("\nSamplingFlags: sample counts with flags set: (%lld total)\n", total); + for (unsigned i = 0; i <= 32; ++i) { + if (s_flagCounts[i]) + dataLogF(" [ %02d ] : %lld\t\t(%03.2f%%)\n", i, s_flagCounts[i], (100.0 * s_flagCounts[i]) / total); + } + dataLogF("\n"); + } else + dataLogF("\nSamplingFlags: no samples.\n\n"); +} +uint64_t SamplingFlags::s_flagCounts[33]; + +#else +void SamplingFlags::start() {} +void SamplingFlags::stop() {} +#endif + +#if ENABLE(SAMPLING_REGIONS) +volatile uintptr_t SamplingRegion::s_currentOrReserved; +Spectrum<const char*>* SamplingRegion::s_spectrum; +unsigned long SamplingRegion::s_noneOfTheAbove; +unsigned SamplingRegion::s_numberOfSamplesSinceDump; + +SamplingRegion::Locker::Locker() +{ + uintptr_t previous; + while (true) { + previous = s_currentOrReserved; + if (previous & 1) { +#if OS(UNIX) + sched_yield(); +#endif + continue; + } + if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous | 1)) + break; + } +} + +SamplingRegion::Locker::~Locker() +{ + // We don't need the CAS, but we do it out of an + // abundance of caution (and because it gives us a memory fence, which is + // never bad). + uintptr_t previous; + do { + previous = s_currentOrReserved; + } while (!WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, previous & ~1)); +} + +void SamplingRegion::sample() +{ + // Make sure we lock s_current. + Locker locker; + + // Create a spectrum if we don't have one already. + if (!s_spectrum) + s_spectrum = new Spectrum<const char*>(); + + ASSERT(s_currentOrReserved & 1); + + // Walk the region stack, and record each region we see. + SamplingRegion* region = bitwise_cast<SamplingRegion*>(s_currentOrReserved & ~1); + if (region) { + for (; region; region = region->m_previous) + s_spectrum->add(region->m_name); + } else + s_noneOfTheAbove++; + + if (s_numberOfSamplesSinceDump++ == SamplingThread::s_hertz) { + s_numberOfSamplesSinceDump = 0; + dumpInternal(); + } +} + +void SamplingRegion::dump() +{ + Locker locker; + + dumpInternal(); +} + +void SamplingRegion::dumpInternal() +{ + if (!s_spectrum) { + dataLogF("\nSamplingRegion: was never sampled.\n\n"); + return; + } + + Vector<Spectrum<const char*>::KeyAndCount> list = s_spectrum->buildList(); + + unsigned long total = s_noneOfTheAbove; + for (unsigned i = list.size(); i--;) + total += list[i].count; + + dataLogF("\nSamplingRegion: sample counts for regions: (%lu samples)\n", total); + + for (unsigned i = list.size(); i--;) + dataLogF(" %3.2lf%% %s\n", (100.0 * list[i].count) / total, list[i].key); +} +#else // ENABLE(SAMPLING_REGIONS) +void SamplingRegion::dump() { } +#endif // ENABLE(SAMPLING_REGIONS) + +/* + Start with flag 16 set. + By doing this the monitoring of lower valued flags will be masked out + until flag 16 is explictly cleared. +*/ +uint32_t SamplingFlags::s_flags = 1 << 15; + + +#if OS(WINDOWS) + +static void sleepForMicroseconds(unsigned us) +{ + unsigned ms = us / 1000; + if (us && !ms) + ms = 1; + Sleep(ms); +} + +#else + +static void sleepForMicroseconds(unsigned us) +{ + usleep(us); +} + +#endif + +static inline unsigned hertz2us(unsigned hertz) +{ + return 1000000 / hertz; +} + + +SamplingTool* SamplingTool::s_samplingTool = 0; + + +bool SamplingThread::s_running = false; +unsigned SamplingThread::s_hertz = 10000; +ThreadIdentifier SamplingThread::s_samplingThread; + +void SamplingThread::threadStartFunc(void*) +{ + while (s_running) { + sleepForMicroseconds(hertz2us(s_hertz)); + +#if ENABLE(SAMPLING_FLAGS) + SamplingFlags::sample(); +#endif +#if ENABLE(SAMPLING_REGIONS) + SamplingRegion::sample(); +#endif +#if ENABLE(OPCODE_SAMPLING) + SamplingTool::sample(); +#endif + } +} + + +void SamplingThread::start(unsigned hertz) +{ + ASSERT(!s_running); + s_running = true; + s_hertz = hertz; + + s_samplingThread = createThread(threadStartFunc, 0, "JavaScriptCore::Sampler"); +} + +void SamplingThread::stop() +{ + ASSERT(s_running); + s_running = false; + waitForThreadCompletion(s_samplingThread); +} + + +void ScriptSampleRecord::sample(CodeBlock* codeBlock, Instruction* vPC) +{ + if (!m_samples) { + m_size = codeBlock->instructions().size(); + m_samples = static_cast<int*>(calloc(m_size, sizeof(int))); + m_codeBlock = codeBlock; + } + + ++m_sampleCount; + + unsigned offest = vPC - codeBlock->instructions().begin(); + // Since we don't read and write codeBlock and vPC atomically, this check + // can fail if we sample mid op_call / op_ret. + if (offest < m_size) { + m_samples[offest]++; + m_opcodeSampleCount++; + } +} + +void SamplingTool::doRun() +{ + Sample sample(m_sample, m_codeBlock); + ++m_sampleCount; + + if (sample.isNull()) + return; + + if (!sample.inHostFunction()) { + unsigned opcodeID = m_interpreter->getOpcodeID(sample.vPC()[0].u.opcode); + + ++m_opcodeSampleCount; + ++m_opcodeSamples[opcodeID]; + + if (sample.inCTIFunction()) + m_opcodeSamplesInCTIFunctions[opcodeID]++; + } + +#if ENABLE(CODEBLOCK_SAMPLING) + if (CodeBlock* codeBlock = sample.codeBlock()) { + LockHolder locker(m_scriptSampleMapMutex); + ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable()); + ASSERT(record); + record->sample(codeBlock, sample.vPC()); + } +#endif +} + +void SamplingTool::sample() +{ + s_samplingTool->doRun(); +} + +void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script) +{ +#if ENABLE(CODEBLOCK_SAMPLING) + LockHolder locker(m_scriptSampleMapMutex); + m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script))); +#else + UNUSED_PARAM(vm); + UNUSED_PARAM(script); +#endif +} + +void SamplingTool::setup() +{ + s_samplingTool = this; +} + +#if ENABLE(OPCODE_SAMPLING) + +struct OpcodeSampleInfo { + OpcodeID opcode; + long long count; + long long countInCTIFunctions; +}; + +struct LineCountInfo { + unsigned line; + unsigned count; +}; + +static int compareOpcodeIndicesSampling(const void* left, const void* right) +{ + const OpcodeSampleInfo* leftSampleInfo = reinterpret_cast<const OpcodeSampleInfo*>(left); + const OpcodeSampleInfo* rightSampleInfo = reinterpret_cast<const OpcodeSampleInfo*>(right); + + return (leftSampleInfo->count < rightSampleInfo->count) ? 1 : (leftSampleInfo->count > rightSampleInfo->count) ? -1 : 0; +} + +#if ENABLE(CODEBLOCK_SAMPLING) +static int compareLineCountInfoSampling(const void* left, const void* right) +{ + const LineCountInfo* leftLineCount = reinterpret_cast<const LineCountInfo*>(left); + const LineCountInfo* rightLineCount = reinterpret_cast<const LineCountInfo*>(right); + + return (leftLineCount->line > rightLineCount->line) ? 1 : (leftLineCount->line < rightLineCount->line) ? -1 : 0; +} + +static int compareScriptSampleRecords(const void* left, const void* right) +{ + const ScriptSampleRecord* const leftValue = *static_cast<const ScriptSampleRecord* const *>(left); + const ScriptSampleRecord* const rightValue = *static_cast<const ScriptSampleRecord* const *>(right); + + return (leftValue->m_sampleCount < rightValue->m_sampleCount) ? 1 : (leftValue->m_sampleCount > rightValue->m_sampleCount) ? -1 : 0; +} +#endif + +void SamplingTool::dump(ExecState* exec) +{ + // Tidies up SunSpider output by removing short scripts - such a small number of samples would likely not be useful anyhow. + if (m_sampleCount < 10) + return; + + // (1) Build and sort 'opcodeSampleInfo' array. + + OpcodeSampleInfo opcodeSampleInfo[numOpcodeIDs]; + for (int i = 0; i < numOpcodeIDs; ++i) { + opcodeSampleInfo[i].opcode = static_cast<OpcodeID>(i); + opcodeSampleInfo[i].count = m_opcodeSamples[i]; + opcodeSampleInfo[i].countInCTIFunctions = m_opcodeSamplesInCTIFunctions[i]; + } + + qsort(opcodeSampleInfo, numOpcodeIDs, sizeof(OpcodeSampleInfo), compareOpcodeIndicesSampling); + + // (2) Print Opcode sampling results. + + dataLogF("\nBytecode samples [*]\n"); + dataLogF(" sample %% of %% of | cti cti %%\n"); + dataLogF("opcode count VM total | count of self\n"); + dataLogF("------------------------------------------------------- | ----------------\n"); + + for (int i = 0; i < numOpcodeIDs; ++i) { + long long count = opcodeSampleInfo[i].count; + if (!count) + continue; + + OpcodeID opcodeID = opcodeSampleInfo[i].opcode; + + const char* opcodeName = opcodeNames[opcodeID]; + const char* opcodePadding = padOpcodeName(opcodeID, 28); + double percentOfVM = (static_cast<double>(count) * 100) / m_opcodeSampleCount; + double percentOfTotal = (static_cast<double>(count) * 100) / m_sampleCount; + long long countInCTIFunctions = opcodeSampleInfo[i].countInCTIFunctions; + double percentInCTIFunctions = (static_cast<double>(countInCTIFunctions) * 100) / count; + debugDebugPrintf("%s:%s%-6lld %.3f%%\t%.3f%%\t | %-6lld %.3f%%\n", opcodeName, opcodePadding, count, percentOfVM, percentOfTotal, countInCTIFunctions, percentInCTIFunctions); + } + + dataLogF("\n[*] Samples inside host code are not charged to any Bytecode.\n\n"); + dataLogF("\tSamples inside VM:\t\t%lld / %lld (%.3f%%)\n", m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_opcodeSampleCount) * 100) / m_sampleCount); + dataLogF("\tSamples inside host code:\t%lld / %lld (%.3f%%)\n\n", m_sampleCount - m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_sampleCount - m_opcodeSampleCount) * 100) / m_sampleCount); + dataLogF("\tsample count:\tsamples inside this opcode\n"); + dataLogF("\t%% of VM:\tsample count / all opcode samples\n"); + dataLogF("\t%% of total:\tsample count / all samples\n"); + dataLogF("\t--------------\n"); + dataLogF("\tcti count:\tsamples inside a CTI function called by this opcode\n"); + dataLogF("\tcti %% of self:\tcti count / sample count\n"); + +#if ENABLE(CODEBLOCK_SAMPLING) + + // (3) Build and sort 'codeBlockSamples' array. + + int scopeCount = m_scopeSampleMap->size(); + Vector<ScriptSampleRecord*> codeBlockSamples(scopeCount); + ScriptSampleRecordMap::iterator iter = m_scopeSampleMap->begin(); + for (int i = 0; i < scopeCount; ++i, ++iter) + codeBlockSamples[i] = iter->value.get(); + + qsort(codeBlockSamples.begin(), scopeCount, sizeof(ScriptSampleRecord*), compareScriptSampleRecords); + + // (4) Print data from 'codeBlockSamples' array. + + dataLogF("\nCodeBlock samples\n\n"); + + for (int i = 0; i < scopeCount; ++i) { + ScriptSampleRecord* record = codeBlockSamples[i]; + CodeBlock* codeBlock = record->m_codeBlock; + + double blockPercent = (record->m_sampleCount * 100.0) / m_sampleCount; + + if (blockPercent >= 1) { + //Instruction* code = codeBlock->instructions().begin(); + dataLogF("#%d: %s:%d: %d / %lld (%.3f%%)\n", i + 1, record->m_executable->sourceURL().utf8().data(), codeBlock->lineNumberForBytecodeOffset(0), record->m_sampleCount, m_sampleCount, blockPercent); + if (i < 10) { + HashMap<unsigned,unsigned> lineCounts; + codeBlock->dump(exec); + + dataLogF(" Opcode and line number samples [*]\n\n"); + for (unsigned op = 0; op < record->m_size; ++op) { + int count = record->m_samples[op]; + if (count) { + dataLogF(" [% 4d] has sample count: % 4d\n", op, count); + unsigned line = codeBlock->lineNumberForBytecodeOffset(op); + lineCounts.set(line, (lineCounts.contains(line) ? lineCounts.get(line) : 0) + count); + } + } + dataLogF("\n"); + + int linesCount = lineCounts.size(); + Vector<LineCountInfo> lineCountInfo(linesCount); + int lineno = 0; + for (HashMap<unsigned,unsigned>::iterator iter = lineCounts.begin(); iter != lineCounts.end(); ++iter, ++lineno) { + lineCountInfo[lineno].line = iter->key; + lineCountInfo[lineno].count = iter->value; + } + + qsort(lineCountInfo.begin(), linesCount, sizeof(LineCountInfo), compareLineCountInfoSampling); + + for (lineno = 0; lineno < linesCount; ++lineno) { + dataLogF(" Line #%d has sample count %d.\n", lineCountInfo[lineno].line, lineCountInfo[lineno].count); + } + dataLogF("\n"); + dataLogF(" [*] Samples inside host code are charged to the calling Bytecode.\n"); + dataLogF(" Samples on a call / return boundary are not charged to a specific opcode or line.\n\n"); + dataLogF(" Samples on a call / return boundary: %d / %d (%.3f%%)\n\n", record->m_sampleCount - record->m_opcodeSampleCount, record->m_sampleCount, (static_cast<double>(record->m_sampleCount - record->m_opcodeSampleCount) * 100) / record->m_sampleCount); + } + } + } +#else + UNUSED_PARAM(exec); +#endif +} + +#else + +void SamplingTool::dump(ExecState*) +{ +} + +#endif + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h new file mode 100644 index 000000000..18e348377 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/SamplingTool.h @@ -0,0 +1,349 @@ +/* + * Copyright (C) 2008, 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SamplingTool_h +#define SamplingTool_h + +#include "Strong.h" +#include "Opcode.h" +#include "SamplingCounter.h" +#include <wtf/Assertions.h> +#include <wtf/Atomics.h> +#include <wtf/HashMap.h> +#include <wtf/Lock.h> +#include <wtf/MainThread.h> +#include <wtf/Spectrum.h> +#include <wtf/Threading.h> + +namespace JSC { + + class ScriptExecutable; + + class SamplingFlags { + public: + JS_EXPORT_PRIVATE static void start(); + JS_EXPORT_PRIVATE static void stop(); + +#if ENABLE(SAMPLING_FLAGS) + static void setFlag(unsigned flag) + { + ASSERT(flag >= 1); + ASSERT(flag <= 32); + s_flags |= 1u << (flag - 1); + } + + static void clearFlag(unsigned flag) + { + ASSERT(flag >= 1); + ASSERT(flag <= 32); + s_flags &= ~(1u << (flag - 1)); + } + + static void sample(); + + class ScopedFlag { + public: + ScopedFlag(int flag) + : m_flag(flag) + { + setFlag(flag); + } + + ~ScopedFlag() + { + clearFlag(m_flag); + } + + private: + int m_flag; + }; + + static const void* addressOfFlags() + { + return &s_flags; + } + +#endif + private: + JS_EXPORTDATA static uint32_t s_flags; +#if ENABLE(SAMPLING_FLAGS) + static uint64_t s_flagCounts[33]; +#endif + }; + +#if ENABLE(SAMPLING_REGIONS) + class SamplingRegion { + public: + // Create a scoped sampling region using a C string constant name that describes + // what you are doing. This must be a string constant that persists for the + // lifetime of the process and is immutable. + SamplingRegion(const char* name) + { + if (!isMainThread()) { + m_name = 0; + return; + } + + m_name = name; + exchangeCurrent(this, &m_previous); + ASSERT(!m_previous || m_previous > this); + } + + ~SamplingRegion() + { + if (!m_name) + return; + + ASSERT(bitwise_cast<SamplingRegion*>(s_currentOrReserved & ~1) == this); + exchangeCurrent(m_previous); + } + + static void sample(); + + JS_EXPORT_PRIVATE static void dump(); + + private: + const char* m_name; + SamplingRegion* m_previous; + + static void exchangeCurrent(SamplingRegion* current, SamplingRegion** previousPtr = 0) + { + uintptr_t previous; + while (true) { + previous = s_currentOrReserved; + + // If it's reserved (i.e. sampling thread is reading it), loop around. + if (previous & 1) { +#if OS(UNIX) + sched_yield(); +#endif + continue; + } + + // If we're going to CAS, then make sure previous is set. + if (previousPtr) + *previousPtr = bitwise_cast<SamplingRegion*>(previous); + + if (WTF::weakCompareAndSwapUIntPtr(&s_currentOrReserved, previous, bitwise_cast<uintptr_t>(current))) + break; + } + } + + static void dumpInternal(); + + class Locker { + public: + Locker(); + ~Locker(); + }; + + static volatile uintptr_t s_currentOrReserved; + + // rely on identity hashing of string constants + static Spectrum<const char*>* s_spectrum; + + static unsigned long s_noneOfTheAbove; + + static unsigned s_numberOfSamplesSinceDump; + }; +#else // ENABLE(SAMPLING_REGIONS) + class SamplingRegion { + public: + SamplingRegion(const char*) { } + JS_EXPORT_PRIVATE void dump(); + }; +#endif // ENABLE(SAMPLING_REGIONS) + + class CodeBlock; + class ExecState; + class Interpreter; + class ScopeNode; + struct Instruction; + + struct ScriptSampleRecord { + ScriptSampleRecord(VM& vm, ScriptExecutable* executable) + : m_executable(vm, executable) + , m_codeBlock(0) + , m_sampleCount(0) + , m_opcodeSampleCount(0) + , m_samples(0) + , m_size(0) + { + } + + ~ScriptSampleRecord() + { + if (m_samples) + free(m_samples); + } + + void sample(CodeBlock*, Instruction*); + + Strong<ScriptExecutable> m_executable; + CodeBlock* m_codeBlock; + int m_sampleCount; + int m_opcodeSampleCount; + int* m_samples; + unsigned m_size; + }; + + typedef HashMap<ScriptExecutable*, std::unique_ptr<ScriptSampleRecord>> ScriptSampleRecordMap; + + class SamplingThread { + public: + // Sampling thread state. + static bool s_running; + static unsigned s_hertz; + static ThreadIdentifier s_samplingThread; + + JS_EXPORT_PRIVATE static void start(unsigned hertz=10000); + JS_EXPORT_PRIVATE static void stop(); + + static void threadStartFunc(void*); + }; + + class SamplingTool { + WTF_MAKE_FAST_ALLOCATED; + public: + friend struct CallRecord; + +#if ENABLE(OPCODE_SAMPLING) + class CallRecord { + WTF_MAKE_NONCOPYABLE(CallRecord); + public: + CallRecord(SamplingTool* samplingTool, bool isHostCall = false) + : m_samplingTool(samplingTool) + , m_savedSample(samplingTool->m_sample) + , m_savedCodeBlock(samplingTool->m_codeBlock) + { + if (isHostcall) + samplingTool->m_sample |= 0x1; + } + + ~CallRecord() + { + m_samplingTool->m_sample = m_savedSample; + m_samplingTool->m_codeBlock = m_savedCodeBlock; + } + + private: + SamplingTool* m_samplingTool; + intptr_t m_savedSample; + CodeBlock* m_savedCodeBlock; + }; +#else + class CallRecord { + WTF_MAKE_NONCOPYABLE(CallRecord); + public: + CallRecord(SamplingTool*, bool = false) + { + } + }; +#endif + + SamplingTool(Interpreter* interpreter) + : m_interpreter(interpreter) + , m_codeBlock(0) + , m_sample(0) + , m_sampleCount(0) + , m_opcodeSampleCount(0) +#if ENABLE(CODEBLOCK_SAMPLING) + , m_scopeSampleMap(std::make_unique<ScriptSampleRecordMap>) +#endif + { + memset(m_opcodeSamples, 0, sizeof(m_opcodeSamples)); + memset(m_opcodeSamplesInCTIFunctions, 0, sizeof(m_opcodeSamplesInCTIFunctions)); + } + + JS_EXPORT_PRIVATE void setup(); + void dump(ExecState*); + + void notifyOfScope(VM&, ScriptExecutable* scope); + + void sample(CodeBlock* codeBlock, Instruction* vPC) + { + ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3)); + m_codeBlock = codeBlock; + m_sample = reinterpret_cast<intptr_t>(vPC); + } + + CodeBlock** codeBlockSlot() { return &m_codeBlock; } + intptr_t* sampleSlot() { return &m_sample; } + + void* encodeSample(Instruction* vPC, bool inCTIFunction = false, bool inHostFunction = false) + { + ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3)); + return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(vPC) | (static_cast<intptr_t>(inCTIFunction) << 1) | static_cast<intptr_t>(inHostFunction)); + } + + static void sample(); + + private: + class Sample { + public: + Sample(volatile intptr_t sample, CodeBlock* volatile codeBlock) + : m_sample(sample) + , m_codeBlock(codeBlock) + { + } + + bool isNull() { return !m_sample; } + CodeBlock* codeBlock() { return m_codeBlock; } + Instruction* vPC() { return reinterpret_cast<Instruction*>(m_sample & ~0x3); } + bool inHostFunction() { return m_sample & 0x1; } + bool inCTIFunction() { return m_sample & 0x2; } + + private: + intptr_t m_sample; + CodeBlock* m_codeBlock; + }; + + void doRun(); + static SamplingTool* s_samplingTool; + + Interpreter* m_interpreter; + + // State tracked by the main thread, used by the sampling thread. + CodeBlock* m_codeBlock; + intptr_t m_sample; + + // Gathered sample data. + long long m_sampleCount; + long long m_opcodeSampleCount; + unsigned m_opcodeSamples[numOpcodeIDs]; + unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs]; + +#if ENABLE(CODEBLOCK_SAMPLING) + Lock m_scriptSampleMapMutex; + std::unique_ptr<ScriptSampleRecordMap> m_scopeSampleMap; +#endif + }; + +} // namespace JSC + +#endif // SamplingTool_h diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp new file mode 100644 index 000000000..dc5a363b6 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "SpecialPointer.h" + +#include "CodeBlock.h" +#include "JSGlobalObject.h" +#include "JSCInlines.h" + +namespace JSC { + +void* actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer) +{ + return globalObject->actualPointerFor(pointer); +} + +void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer) +{ + return actualPointerFor(codeBlock->globalObject(), pointer); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.h b/Source/JavaScriptCore/bytecode/SpecialPointer.h new file mode 100644 index 000000000..64fb23fcf --- /dev/null +++ b/Source/JavaScriptCore/bytecode/SpecialPointer.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SpecialPointer_h +#define SpecialPointer_h + +namespace JSC { + +class CodeBlock; +class JSGlobalObject; + +namespace Special { +enum Pointer { + CallFunction, + ApplyFunction, + ObjectConstructor, + ArrayConstructor, + TableSize // Not a real special pointer. Use this to determine the number of pointers. +}; +} // namespace Special + +enum class LinkTimeConstant { + DefinePropertyFunction, +}; +const unsigned LinkTimeConstantCount = 1; + +inline bool pointerIsFunction(Special::Pointer pointer) +{ + ASSERT_UNUSED(pointer, pointer < Special::TableSize); + return true; +} + +inline bool pointerIsCell(Special::Pointer pointer) +{ + ASSERT_UNUSED(pointer, pointer < Special::TableSize); + return true; +} + +void* actualPointerFor(JSGlobalObject*, Special::Pointer); +void* actualPointerFor(CodeBlock*, Special::Pointer); + +} // namespace JSC + +#endif // SpecialPointer_h + diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.cpp b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp new file mode 100644 index 000000000..ca9514c01 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/SpeculatedType.cpp @@ -0,0 +1,562 @@ +/* + * Copyright (C) 2011-2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "SpeculatedType.h" + +#include "DirectArguments.h" +#include "JSArray.h" +#include "JSFunction.h" +#include "JSCInlines.h" +#include "ScopedArguments.h" +#include "StringObject.h" +#include "ValueProfile.h" +#include <wtf/StringPrintStream.h> + +namespace JSC { + +void dumpSpeculation(PrintStream& out, SpeculatedType value) +{ + if (value == SpecNone) { + out.print("None"); + return; + } + + StringPrintStream myOut; + + bool isTop = true; + + if ((value & SpecCell) == SpecCell) + myOut.print("Cell"); + else { + if ((value & SpecObject) == SpecObject) + myOut.print("Object"); + else { + if (value & SpecCellOther) + myOut.print("Othercell"); + else + isTop = false; + + if (value & SpecObjectOther) + myOut.print("Otherobj"); + else + isTop = false; + + if (value & SpecFinalObject) + myOut.print("Final"); + else + isTop = false; + + if (value & SpecArray) + myOut.print("Array"); + else + isTop = false; + + if (value & SpecInt8Array) + myOut.print("Int8array"); + else + isTop = false; + + if (value & SpecInt16Array) + myOut.print("Int16array"); + else + isTop = false; + + if (value & SpecInt32Array) + myOut.print("Int32array"); + else + isTop = false; + + if (value & SpecUint8Array) + myOut.print("Uint8array"); + else + isTop = false; + + if (value & SpecUint8ClampedArray) + myOut.print("Uint8clampedarray"); + else + isTop = false; + + if (value & SpecUint16Array) + myOut.print("Uint16array"); + else + isTop = false; + + if (value & SpecUint32Array) + myOut.print("Uint32array"); + else + isTop = false; + + if (value & SpecFloat32Array) + myOut.print("Float32array"); + else + isTop = false; + + if (value & SpecFloat64Array) + myOut.print("Float64array"); + else + isTop = false; + + if (value & SpecFunction) + myOut.print("Function"); + else + isTop = false; + + if (value & SpecDirectArguments) + myOut.print("Directarguments"); + else + isTop = false; + + if (value & SpecScopedArguments) + myOut.print("Scopedarguments"); + else + isTop = false; + + if (value & SpecStringObject) + myOut.print("Stringobject"); + else + isTop = false; + } + + if ((value & SpecString) == SpecString) + myOut.print("String"); + else { + if (value & SpecStringIdent) + myOut.print("Stringident"); + else + isTop = false; + + if (value & SpecStringVar) + myOut.print("Stringvar"); + else + isTop = false; + } + + if (value & SpecSymbol) + myOut.print("Symbol"); + else + isTop = false; + } + + if (value == SpecInt32) + myOut.print("Int32"); + else { + if (value & SpecBoolInt32) + myOut.print("Boolint32"); + else + isTop = false; + + if (value & SpecNonBoolInt32) + myOut.print("Nonboolint32"); + else + isTop = false; + } + + if (value & SpecInt52) + myOut.print("Int52"); + + if ((value & SpecBytecodeDouble) == SpecBytecodeDouble) + myOut.print("Bytecodedouble"); + else { + if (value & SpecInt52AsDouble) + myOut.print("Int52asdouble"); + else + isTop = false; + + if (value & SpecNonIntAsDouble) + myOut.print("Nonintasdouble"); + else + isTop = false; + + if (value & SpecDoublePureNaN) + myOut.print("Doublepurenan"); + else + isTop = false; + } + + if (value & SpecDoubleImpureNaN) + out.print("Doubleimpurenan"); + + if (value & SpecBoolean) + myOut.print("Bool"); + else + isTop = false; + + if (value & SpecOther) + myOut.print("Other"); + else + isTop = false; + + if (isTop) + out.print("Top"); + else + out.print(myOut.toCString()); + + if (value & SpecEmpty) + out.print("Empty"); +} + +// We don't expose this because we don't want anyone relying on the fact that this method currently +// just returns string constants. +static const char* speculationToAbbreviatedString(SpeculatedType prediction) +{ + if (isFinalObjectSpeculation(prediction)) + return "<Final>"; + if (isArraySpeculation(prediction)) + return "<Array>"; + if (isStringIdentSpeculation(prediction)) + return "<StringIdent>"; + if (isStringSpeculation(prediction)) + return "<String>"; + if (isFunctionSpeculation(prediction)) + return "<Function>"; + if (isInt8ArraySpeculation(prediction)) + return "<Int8array>"; + if (isInt16ArraySpeculation(prediction)) + return "<Int16array>"; + if (isInt32ArraySpeculation(prediction)) + return "<Int32array>"; + if (isUint8ArraySpeculation(prediction)) + return "<Uint8array>"; + if (isUint16ArraySpeculation(prediction)) + return "<Uint16array>"; + if (isUint32ArraySpeculation(prediction)) + return "<Uint32array>"; + if (isFloat32ArraySpeculation(prediction)) + return "<Float32array>"; + if (isFloat64ArraySpeculation(prediction)) + return "<Float64array>"; + if (isDirectArgumentsSpeculation(prediction)) + return "<DirectArguments>"; + if (isScopedArgumentsSpeculation(prediction)) + return "<ScopedArguments>"; + if (isStringObjectSpeculation(prediction)) + return "<StringObject>"; + if (isStringOrStringObjectSpeculation(prediction)) + return "<StringOrStringObject>"; + if (isObjectSpeculation(prediction)) + return "<Object>"; + if (isCellSpeculation(prediction)) + return "<Cell>"; + if (isBoolInt32Speculation(prediction)) + return "<BoolInt32>"; + if (isInt32Speculation(prediction)) + return "<Int32>"; + if (isInt52AsDoubleSpeculation(prediction)) + return "<Int52AsDouble>"; + if (isInt52Speculation(prediction)) + return "<Int52>"; + if (isMachineIntSpeculation(prediction)) + return "<MachineInt>"; + if (isDoubleSpeculation(prediction)) + return "<Double>"; + if (isFullNumberSpeculation(prediction)) + return "<Number>"; + if (isBooleanSpeculation(prediction)) + return "<Boolean>"; + if (isOtherSpeculation(prediction)) + return "<Other>"; + if (isMiscSpeculation(prediction)) + return "<Misc>"; + return ""; +} + +void dumpSpeculationAbbreviated(PrintStream& out, SpeculatedType value) +{ + out.print(speculationToAbbreviatedString(value)); +} + +SpeculatedType speculationFromTypedArrayType(TypedArrayType type) +{ + switch (type) { + case TypeInt8: + return SpecInt8Array; + case TypeInt16: + return SpecInt16Array; + case TypeInt32: + return SpecInt32Array; + case TypeUint8: + return SpecUint8Array; + case TypeUint8Clamped: + return SpecUint8ClampedArray; + case TypeUint16: + return SpecUint16Array; + case TypeUint32: + return SpecUint32Array; + case TypeFloat32: + return SpecFloat32Array; + case TypeFloat64: + return SpecFloat64Array; + case NotTypedArray: + case TypeDataView: + break; + } + RELEASE_ASSERT_NOT_REACHED(); + return SpecNone; +} + +SpeculatedType speculationFromClassInfo(const ClassInfo* classInfo) +{ + if (classInfo == JSFinalObject::info()) + return SpecFinalObject; + + if (classInfo == JSArray::info()) + return SpecArray; + + if (classInfo == DirectArguments::info()) + return SpecDirectArguments; + + if (classInfo == ScopedArguments::info()) + return SpecScopedArguments; + + if (classInfo == StringObject::info()) + return SpecStringObject; + + if (classInfo->isSubClassOf(JSFunction::info())) + return SpecFunction; + + if (isTypedView(classInfo->typedArrayStorageType)) + return speculationFromTypedArrayType(classInfo->typedArrayStorageType); + + if (classInfo->isSubClassOf(JSObject::info())) + return SpecObjectOther; + + return SpecCellOther; +} + +SpeculatedType speculationFromStructure(Structure* structure) +{ + if (structure->typeInfo().type() == StringType) + return SpecString; + if (structure->typeInfo().type() == SymbolType) + return SpecSymbol; + return speculationFromClassInfo(structure->classInfo()); +} + +SpeculatedType speculationFromCell(JSCell* cell) +{ + if (JSString* string = jsDynamicCast<JSString*>(cell)) { + if (const StringImpl* impl = string->tryGetValueImpl()) { + if (impl->isAtomic()) + return SpecStringIdent; + } + return SpecStringVar; + } + return speculationFromStructure(cell->structure()); +} + +SpeculatedType speculationFromValue(JSValue value) +{ + if (value.isEmpty()) + return SpecEmpty; + if (value.isInt32()) { + if (value.asInt32() & ~1) + return SpecNonBoolInt32; + return SpecBoolInt32; + } + if (value.isDouble()) { + double number = value.asNumber(); + if (number != number) + return SpecDoublePureNaN; + if (value.isMachineInt()) + return SpecInt52AsDouble; + return SpecNonIntAsDouble; + } + if (value.isCell()) + return speculationFromCell(value.asCell()); + if (value.isBoolean()) + return SpecBoolean; + ASSERT(value.isUndefinedOrNull()); + return SpecOther; +} + +TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType type) +{ + if (isInt8ArraySpeculation(type)) + return TypeInt8; + + if (isInt16ArraySpeculation(type)) + return TypeInt16; + + if (isInt32ArraySpeculation(type)) + return TypeInt32; + + if (isUint8ArraySpeculation(type)) + return TypeUint8; + + if (isUint8ClampedArraySpeculation(type)) + return TypeUint8Clamped; + + if (isUint16ArraySpeculation(type)) + return TypeUint16; + + if (isUint32ArraySpeculation(type)) + return TypeUint32; + + if (isFloat32ArraySpeculation(type)) + return TypeFloat32; + + if (isFloat64ArraySpeculation(type)) + return TypeFloat64; + + return NotTypedArray; +} + +SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType type) +{ + if (type & SpecInteger) + type |= SpecInteger; + if (type & SpecString) + type |= SpecString; + return type; +} + +bool valuesCouldBeEqual(SpeculatedType a, SpeculatedType b) +{ + a = leastUpperBoundOfStrictlyEquivalentSpeculations(a); + b = leastUpperBoundOfStrictlyEquivalentSpeculations(b); + + // Anything could be equal to a string. + if (a & SpecString) + return true; + if (b & SpecString) + return true; + + // If both sides are definitely only objects, then equality is fairly sane. + if (isObjectSpeculation(a) && isObjectSpeculation(b)) + return !!(a & b); + + // If either side could be an object or not, then we could call toString or + // valueOf, which could return anything. + if (a & SpecObject) + return true; + if (b & SpecObject) + return true; + + // Neither side is an object or string, so the world is relatively sane. + return !!(a & b); +} + +SpeculatedType typeOfDoubleSum(SpeculatedType a, SpeculatedType b) +{ + SpeculatedType result = a | b; + // Impure NaN could become pure NaN during addition because addition may clear bits. + if (result & SpecDoubleImpureNaN) + result |= SpecDoublePureNaN; + // Values could overflow, or fractions could become integers. + if (result & SpecDoubleReal) + result |= SpecDoubleReal; + return result; +} + +SpeculatedType typeOfDoubleDifference(SpeculatedType a, SpeculatedType b) +{ + return typeOfDoubleSum(a, b); +} + +SpeculatedType typeOfDoubleProduct(SpeculatedType a, SpeculatedType b) +{ + return typeOfDoubleSum(a, b); +} + +static SpeculatedType polluteDouble(SpeculatedType value) +{ + // Impure NaN could become pure NaN because the operation could clear some bits. + if (value & SpecDoubleImpureNaN) + value |= SpecDoubleNaN; + // Values could overflow, fractions could become integers, or an error could produce + // PureNaN. + if (value & SpecDoubleReal) + value |= SpecDoubleReal | SpecDoublePureNaN; + return value; +} + +SpeculatedType typeOfDoubleQuotient(SpeculatedType a, SpeculatedType b) +{ + return polluteDouble(a | b); +} + +SpeculatedType typeOfDoubleMinMax(SpeculatedType a, SpeculatedType b) +{ + SpeculatedType result = a | b; + // Impure NaN could become pure NaN during addition because addition may clear bits. + if (result & SpecDoubleImpureNaN) + result |= SpecDoublePureNaN; + return result; +} + +SpeculatedType typeOfDoubleNegation(SpeculatedType value) +{ + // Impure NaN could become pure NaN because bits might get cleared. + if (value & SpecDoubleImpureNaN) + value |= SpecDoublePureNaN; + // We could get negative zero, which mixes SpecInt52AsDouble and SpecNotIntAsDouble. + // We could also overflow a large negative int into something that is no longer + // representable as an int. + if (value & SpecDoubleReal) + value |= SpecDoubleReal; + return value; +} + +SpeculatedType typeOfDoubleAbs(SpeculatedType value) +{ + return typeOfDoubleNegation(value); +} + +SpeculatedType typeOfDoubleRounding(SpeculatedType value) +{ + // We might lose bits, which leads to a NaN being purified. + if (value & SpecDoubleImpureNaN) + value |= SpecDoublePureNaN; + // We might lose bits, which leads to a value becoming integer-representable. + if (value & SpecNonIntAsDouble) + value |= SpecInt52AsDouble; + return value; +} + +SpeculatedType typeOfDoublePow(SpeculatedType xValue, SpeculatedType yValue) +{ + // Math.pow() always return NaN if the exponent is NaN, unlike std::pow(). + // We always set a pure NaN in that case. + if (yValue & SpecDoubleNaN) + xValue |= SpecDoublePureNaN; + return polluteDouble(xValue); +} + +SpeculatedType typeOfDoubleBinaryOp(SpeculatedType a, SpeculatedType b) +{ + return polluteDouble(a | b); +} + +SpeculatedType typeOfDoubleUnaryOp(SpeculatedType value) +{ + return polluteDouble(value); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h new file mode 100644 index 000000000..bd045c3ac --- /dev/null +++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SpeculatedType_h +#define SpeculatedType_h + +#include "JSCJSValue.h" +#include "TypedArrayType.h" +#include <wtf/PrintStream.h> + +namespace JSC { + +class Structure; + +typedef uint32_t SpeculatedType; +static const SpeculatedType SpecNone = 0x00000000; // We don't know anything yet. +static const SpeculatedType SpecFinalObject = 0x00000001; // It's definitely a JSFinalObject. +static const SpeculatedType SpecArray = 0x00000002; // It's definitely a JSArray. +static const SpeculatedType SpecFunction = 0x00000004; // It's definitely a JSFunction. +static const SpeculatedType SpecInt8Array = 0x00000008; // It's definitely an Int8Array or one of its subclasses. +static const SpeculatedType SpecInt16Array = 0x00000010; // It's definitely an Int16Array or one of its subclasses. +static const SpeculatedType SpecInt32Array = 0x00000020; // It's definitely an Int32Array or one of its subclasses. +static const SpeculatedType SpecUint8Array = 0x00000040; // It's definitely an Uint8Array or one of its subclasses. +static const SpeculatedType SpecUint8ClampedArray = 0x00000080; // It's definitely an Uint8ClampedArray or one of its subclasses. +static const SpeculatedType SpecUint16Array = 0x00000100; // It's definitely an Uint16Array or one of its subclasses. +static const SpeculatedType SpecUint32Array = 0x00000200; // It's definitely an Uint32Array or one of its subclasses. +static const SpeculatedType SpecFloat32Array = 0x00000400; // It's definitely an Uint16Array or one of its subclasses. +static const SpeculatedType SpecFloat64Array = 0x00000800; // It's definitely an Uint16Array or one of its subclasses. +static const SpeculatedType SpecTypedArrayView = SpecInt8Array | SpecInt16Array | SpecInt32Array | SpecUint8Array | SpecUint8ClampedArray | SpecUint16Array | SpecUint32Array | SpecFloat32Array | SpecFloat64Array; +static const SpeculatedType SpecDirectArguments = 0x00001000; // It's definitely a DirectArguments object. +static const SpeculatedType SpecScopedArguments = 0x00002000; // It's definitely a ScopedArguments object. +static const SpeculatedType SpecStringObject = 0x00004000; // It's definitely a StringObject. +static const SpeculatedType SpecObjectOther = 0x00008000; // It's definitely an object but not JSFinalObject, JSArray, or JSFunction. +static const SpeculatedType SpecObject = 0x0000ffff; // Bitmask used for testing for any kind of object prediction. +static const SpeculatedType SpecStringIdent = 0x00010000; // It's definitely a JSString, and it's an identifier. +static const SpeculatedType SpecStringVar = 0x00020000; // It's definitely a JSString, and it's not an identifier. +static const SpeculatedType SpecString = 0x00030000; // It's definitely a JSString. +static const SpeculatedType SpecSymbol = 0x00040000; // It's definitely a Symbol. +static const SpeculatedType SpecCellOther = 0x00080000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString or a Symbol. FIXME: This shouldn't be part of heap-top or bytecode-top. https://bugs.webkit.org/show_bug.cgi?id=133078 +static const SpeculatedType SpecCell = 0x000fffff; // It's definitely a JSCell. +static const SpeculatedType SpecBoolInt32 = 0x00100000; // It's definitely an Int32 with value 0 or 1. +static const SpeculatedType SpecNonBoolInt32 = 0x00200000; // It's definitely an Int32 with value other than 0 or 1. +static const SpeculatedType SpecInt32 = 0x00300000; // It's definitely an Int32. +static const SpeculatedType SpecInt52 = 0x00400000; // It's definitely an Int52 and we intend it to unbox it. +static const SpeculatedType SpecMachineInt = 0x00700000; // It's something that we can do machine int arithmetic on. +static const SpeculatedType SpecInt52AsDouble = 0x00800000; // It's definitely an Int52 and it's inside a double. +static const SpeculatedType SpecInteger = 0x00f00000; // It's definitely some kind of integer. +static const SpeculatedType SpecNonIntAsDouble = 0x01000000; // It's definitely not an Int52 but it's a real number and it's a double. +static const SpeculatedType SpecDoubleReal = 0x01800000; // It's definitely a non-NaN double. +static const SpeculatedType SpecDoublePureNaN = 0x02000000; // It's definitely a NaN that is sae to tag (i.e. pure). +static const SpeculatedType SpecDoubleImpureNaN = 0x04000000; // It's definitely a NaN that is unsafe to tag (i.e. impure). +static const SpeculatedType SpecDoubleNaN = 0x06000000; // It's definitely some kind of NaN. +static const SpeculatedType SpecBytecodeDouble = 0x03800000; // It's either a non-NaN or a NaN double, but it's definitely not impure NaN. +static const SpeculatedType SpecFullDouble = 0x07800000; // It's either a non-NaN or a NaN double. +static const SpeculatedType SpecBytecodeRealNumber = 0x01b00000; // It's either an Int32 or a DoubleReal. +static const SpeculatedType SpecFullRealNumber = 0x01f00000; // It's either an Int32 or a DoubleReal, or a Int52. +static const SpeculatedType SpecBytecodeNumber = 0x03b00000; // It's either an Int32 or a Double, and the Double cannot be an impure NaN. +static const SpeculatedType SpecFullNumber = 0x07f00000; // It's either an Int32, Int52, or a Double, and the Double can be impure NaN. +static const SpeculatedType SpecBoolean = 0x10000000; // It's definitely a Boolean. +static const SpeculatedType SpecOther = 0x20000000; // It's definitely either Null or Undefined. +static const SpeculatedType SpecMisc = 0x30000000; // It's definitely either a boolean, Null, or Undefined. +static const SpeculatedType SpecHeapTop = 0x3bbfffff; // It can be any of the above, except for SpecInt52. +static const SpeculatedType SpecEmpty = 0x40000000; // It's definitely an empty value marker. +static const SpeculatedType SpecBytecodeTop = 0x7bbfffff; // It can be any of the above, except for SpecInt52. +static const SpeculatedType SpecFullTop = 0x7fffffff; // It can be any of the above plus anything the DFG chooses. + +typedef bool (*SpeculatedTypeChecker)(SpeculatedType); + +// Dummy prediction checker, only useful if someone insists on requiring a prediction checker. +inline bool isAnySpeculation(SpeculatedType) +{ + return true; +} + +inline bool isCellSpeculation(SpeculatedType value) +{ + return !!(value & SpecCell) && !(value & ~SpecCell); +} + +inline bool isNotCellSpeculation(SpeculatedType value) +{ + return !(value & SpecCell) && value; +} + +inline bool isObjectSpeculation(SpeculatedType value) +{ + return !!(value & SpecObject) && !(value & ~SpecObject); +} + +inline bool isObjectOrOtherSpeculation(SpeculatedType value) +{ + return !!(value & (SpecObject | SpecOther)) && !(value & ~(SpecObject | SpecOther)); +} + +inline bool isFinalObjectSpeculation(SpeculatedType value) +{ + return value == SpecFinalObject; +} + +inline bool isFinalObjectOrOtherSpeculation(SpeculatedType value) +{ + return !!(value & (SpecFinalObject | SpecOther)) && !(value & ~(SpecFinalObject | SpecOther)); +} + +inline bool isStringIdentSpeculation(SpeculatedType value) +{ + return value == SpecStringIdent; +} + +inline bool isNotStringVarSpeculation(SpeculatedType value) +{ + return !(value & SpecStringVar); +} + +inline bool isStringSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecString) == value; +} + +inline bool isSymbolSpeculation(SpeculatedType value) +{ + return value == SpecSymbol; +} + +inline bool isArraySpeculation(SpeculatedType value) +{ + return value == SpecArray; +} + +inline bool isFunctionSpeculation(SpeculatedType value) +{ + return value == SpecFunction; +} + +inline bool isInt8ArraySpeculation(SpeculatedType value) +{ + return value == SpecInt8Array; +} + +inline bool isInt16ArraySpeculation(SpeculatedType value) +{ + return value == SpecInt16Array; +} + +inline bool isInt32ArraySpeculation(SpeculatedType value) +{ + return value == SpecInt32Array; +} + +inline bool isUint8ArraySpeculation(SpeculatedType value) +{ + return value == SpecUint8Array; +} + +inline bool isUint8ClampedArraySpeculation(SpeculatedType value) +{ + return value == SpecUint8ClampedArray; +} + +inline bool isUint16ArraySpeculation(SpeculatedType value) +{ + return value == SpecUint16Array; +} + +inline bool isUint32ArraySpeculation(SpeculatedType value) +{ + return value == SpecUint32Array; +} + +inline bool isFloat32ArraySpeculation(SpeculatedType value) +{ + return value == SpecFloat32Array; +} + +inline bool isFloat64ArraySpeculation(SpeculatedType value) +{ + return value == SpecFloat64Array; +} + +inline bool isDirectArgumentsSpeculation(SpeculatedType value) +{ + return value == SpecDirectArguments; +} + +inline bool isScopedArgumentsSpeculation(SpeculatedType value) +{ + return value == SpecScopedArguments; +} + +inline bool isActionableIntMutableArraySpeculation(SpeculatedType value) +{ + return isInt8ArraySpeculation(value) + || isInt16ArraySpeculation(value) + || isInt32ArraySpeculation(value) + || isUint8ArraySpeculation(value) + || isUint8ClampedArraySpeculation(value) + || isUint16ArraySpeculation(value) + || isUint32ArraySpeculation(value); +} + +inline bool isActionableFloatMutableArraySpeculation(SpeculatedType value) +{ + return isFloat32ArraySpeculation(value) + || isFloat64ArraySpeculation(value); +} + +inline bool isActionableTypedMutableArraySpeculation(SpeculatedType value) +{ + return isActionableIntMutableArraySpeculation(value) + || isActionableFloatMutableArraySpeculation(value); +} + +inline bool isActionableMutableArraySpeculation(SpeculatedType value) +{ + return isArraySpeculation(value) + || isActionableTypedMutableArraySpeculation(value); +} + +inline bool isActionableArraySpeculation(SpeculatedType value) +{ + return isStringSpeculation(value) + || isDirectArgumentsSpeculation(value) + || isScopedArgumentsSpeculation(value) + || isActionableMutableArraySpeculation(value); +} + +inline bool isArrayOrOtherSpeculation(SpeculatedType value) +{ + return !!(value & (SpecArray | SpecOther)) && !(value & ~(SpecArray | SpecOther)); +} + +inline bool isStringObjectSpeculation(SpeculatedType value) +{ + return value == SpecStringObject; +} + +inline bool isStringOrStringObjectSpeculation(SpeculatedType value) +{ + return !!value && !(value & ~(SpecString | SpecStringObject)); +} + +inline bool isBoolInt32Speculation(SpeculatedType value) +{ + return value == SpecBoolInt32; +} + +inline bool isInt32Speculation(SpeculatedType value) +{ + return value && !(value & ~SpecInt32); +} + +inline bool isInt32OrBooleanSpeculation(SpeculatedType value) +{ + return value && !(value & ~(SpecBoolean | SpecInt32)); +} + +inline bool isInt32SpeculationForArithmetic(SpeculatedType value) +{ + return !(value & (SpecFullDouble | SpecInt52)); +} + +inline bool isInt32OrBooleanSpeculationForArithmetic(SpeculatedType value) +{ + return !(value & (SpecFullDouble | SpecInt52)); +} + +inline bool isInt32OrBooleanSpeculationExpectingDefined(SpeculatedType value) +{ + return isInt32OrBooleanSpeculation(value & ~SpecOther); +} + +inline bool isInt52Speculation(SpeculatedType value) +{ + return value == SpecInt52; +} + +inline bool isMachineIntSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecMachineInt) == value; +} + +inline bool isInt52AsDoubleSpeculation(SpeculatedType value) +{ + return value == SpecInt52AsDouble; +} + +inline bool isIntegerSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecInteger) == value; +} + +inline bool isDoubleRealSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecDoubleReal) == value; +} + +inline bool isDoubleSpeculation(SpeculatedType value) +{ + return !!value && (value & SpecFullDouble) == value; +} + +inline bool isDoubleSpeculationForArithmetic(SpeculatedType value) +{ + return !!(value & SpecFullDouble); +} + +inline bool isBytecodeRealNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecBytecodeRealNumber) && !(value & ~SpecBytecodeRealNumber); +} + +inline bool isFullRealNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecFullRealNumber) && !(value & ~SpecFullRealNumber); +} + +inline bool isBytecodeNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecBytecodeNumber) && !(value & ~SpecBytecodeNumber); +} + +inline bool isFullNumberSpeculation(SpeculatedType value) +{ + return !!(value & SpecFullNumber) && !(value & ~SpecFullNumber); +} + +inline bool isFullNumberOrBooleanSpeculation(SpeculatedType value) +{ + return value && !(value & ~(SpecFullNumber | SpecBoolean)); +} + +inline bool isFullNumberOrBooleanSpeculationExpectingDefined(SpeculatedType value) +{ + return isFullNumberOrBooleanSpeculation(value & ~SpecOther); +} + +inline bool isBooleanSpeculation(SpeculatedType value) +{ + return value == SpecBoolean; +} + +inline bool isOtherSpeculation(SpeculatedType value) +{ + return value == SpecOther; +} + +inline bool isMiscSpeculation(SpeculatedType value) +{ + return !!value && !(value & ~SpecMisc); +} + +inline bool isOtherOrEmptySpeculation(SpeculatedType value) +{ + return !value || value == SpecOther; +} + +inline bool isEmptySpeculation(SpeculatedType value) +{ + return value == SpecEmpty; +} + +void dumpSpeculation(PrintStream&, SpeculatedType); +void dumpSpeculationAbbreviated(PrintStream&, SpeculatedType); + +MAKE_PRINT_ADAPTOR(SpeculationDump, SpeculatedType, dumpSpeculation); +MAKE_PRINT_ADAPTOR(AbbreviatedSpeculationDump, SpeculatedType, dumpSpeculationAbbreviated); + +// Merge two predictions. Note that currently this just does left | right. It may +// seem tempting to do so directly, but you would be doing so at your own peril, +// since the merging protocol SpeculatedType may change at any time (and has already +// changed several times in its history). +inline SpeculatedType mergeSpeculations(SpeculatedType left, SpeculatedType right) +{ + return left | right; +} + +template<typename T> +inline bool mergeSpeculation(T& left, SpeculatedType right) +{ + SpeculatedType newSpeculation = static_cast<T>(mergeSpeculations(static_cast<SpeculatedType>(left), right)); + bool result = newSpeculation != static_cast<SpeculatedType>(left); + left = newSpeculation; + return result; +} + +inline bool speculationChecked(SpeculatedType actual, SpeculatedType desired) +{ + return (actual | desired) == desired; +} + +SpeculatedType speculationFromClassInfo(const ClassInfo*); +SpeculatedType speculationFromStructure(Structure*); +SpeculatedType speculationFromCell(JSCell*); +SpeculatedType speculationFromValue(JSValue); + +SpeculatedType speculationFromTypedArrayType(TypedArrayType); // only valid for typed views. +TypedArrayType typedArrayTypeFromSpeculation(SpeculatedType); + +SpeculatedType leastUpperBoundOfStrictlyEquivalentSpeculations(SpeculatedType); + +bool valuesCouldBeEqual(SpeculatedType, SpeculatedType); + +// Precise computation of the type of the result of a double computation after we +// already know that the inputs are doubles and that the result must be a double. Use +// the closest one of these that applies. +SpeculatedType typeOfDoubleSum(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleDifference(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleProduct(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleQuotient(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleMinMax(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleNegation(SpeculatedType); +SpeculatedType typeOfDoubleAbs(SpeculatedType); +SpeculatedType typeOfDoubleRounding(SpeculatedType); +SpeculatedType typeOfDoublePow(SpeculatedType, SpeculatedType); + +// This conservatively models the behavior of arbitrary double operations. +SpeculatedType typeOfDoubleBinaryOp(SpeculatedType, SpeculatedType); +SpeculatedType typeOfDoubleUnaryOp(SpeculatedType); + +} // namespace JSC + +#endif // SpeculatedType_h diff --git a/Source/JavaScriptCore/bytecode/StructureSet.cpp b/Source/JavaScriptCore/bytecode/StructureSet.cpp new file mode 100644 index 000000000..40fea8da3 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureSet.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "StructureSet.h" + +#include "DFGAbstractValue.h" +#include "TrackedReferences.h" +#include <wtf/CommaPrinter.h> + +namespace JSC { + +#if ENABLE(DFG_JIT) + +void StructureSet::filter(const DFG::StructureAbstractValue& other) +{ + genericFilter([&] (Structure* structure) -> bool { return other.contains(structure); }); +} + +void StructureSet::filter(SpeculatedType type) +{ + genericFilter( + [&] (Structure* structure) -> bool { + return type & speculationFromStructure(structure); + }); +} + +void StructureSet::filterArrayModes(ArrayModes arrayModes) +{ + genericFilter( + [&] (Structure* structure) -> bool { + return arrayModes & arrayModeFromStructure(structure); + }); +} + +void StructureSet::filter(const DFG::AbstractValue& other) +{ + filter(other.m_structure); + filter(other.m_type); + filterArrayModes(other.m_arrayModes); +} + +#endif // ENABLE(DFG_JIT) + +SpeculatedType StructureSet::speculationFromStructures() const +{ + SpeculatedType result = SpecNone; + forEach( + [&] (Structure* structure) { + mergeSpeculation(result, speculationFromStructure(structure)); + }); + return result; +} + +ArrayModes StructureSet::arrayModesFromStructures() const +{ + ArrayModes result = 0; + forEach( + [&] (Structure* structure) { + mergeArrayModes(result, asArrayModes(structure->indexingType())); + }); + return result; +} + +void StructureSet::dumpInContext(PrintStream& out, DumpContext* context) const +{ + CommaPrinter comma; + out.print("["); + forEach([&] (Structure* structure) { out.print(comma, inContext(*structure, context)); }); + out.print("]"); +} + +void StructureSet::dump(PrintStream& out) const +{ + dumpInContext(out, nullptr); +} + +void StructureSet::validateReferences(const TrackedReferences& trackedReferences) const +{ + forEach( + [&] (Structure* structure) { + trackedReferences.check(structure); + }); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h new file mode 100644 index 000000000..df19ec538 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureSet.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef StructureSet_h +#define StructureSet_h + +#include "ArrayProfile.h" +#include "DumpContext.h" +#include "SpeculatedType.h" +#include "Structure.h" +#include <wtf/TinyPtrSet.h> + +namespace JSC { + +class TrackedReferences; + +namespace DFG { +class StructureAbstractValue; +struct AbstractValue; +} + +class StructureSet : public TinyPtrSet<Structure*> { +public: + // I really want to do this: + // using TinyPtrSet::TinyPtrSet; + // + // But I can't because Windows. + + StructureSet() + { + } + + StructureSet(Structure* structure) + : TinyPtrSet(structure) + { + } + + ALWAYS_INLINE StructureSet(const StructureSet& other) + : TinyPtrSet(other) + { + } + + Structure* onlyStructure() const + { + return onlyEntry(); + } + +#if ENABLE(DFG_JIT) + void filter(const DFG::StructureAbstractValue&); + void filter(SpeculatedType); + void filterArrayModes(ArrayModes); + void filter(const DFG::AbstractValue&); +#endif // ENABLE(DFG_JIT) + + SpeculatedType speculationFromStructures() const; + ArrayModes arrayModesFromStructures() const; + + void dumpInContext(PrintStream&, DumpContext*) const; + void dump(PrintStream&) const; + + void validateReferences(const TrackedReferences&) const; +}; + +} // namespace JSC + +#endif // StructureSet_h diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp new file mode 100644 index 000000000..59c088b54 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "StructureStubClearingWatchpoint.h" + +#if ENABLE(JIT) + +#include "CodeBlock.h" +#include "JSCInlines.h" +#include "StructureStubInfo.h" + +namespace JSC { + +StructureStubClearingWatchpoint::~StructureStubClearingWatchpoint() { } + +StructureStubClearingWatchpoint* StructureStubClearingWatchpoint::push( + const ObjectPropertyCondition& key, + WatchpointsOnStructureStubInfo& holder, + std::unique_ptr<StructureStubClearingWatchpoint>& head) +{ + head = std::make_unique<StructureStubClearingWatchpoint>(key, holder, WTF::move(head)); + return head.get(); +} + +void StructureStubClearingWatchpoint::fireInternal(const FireDetail&) +{ + if (!m_key || !m_key.isWatchable(PropertyCondition::EnsureWatchability)) { + // This will implicitly cause my own demise: stub reset removes all watchpoints. + // That works, because deleting a watchpoint removes it from the set's list, and + // the set's list traversal for firing is robust against the set changing. + m_holder.codeBlock()->resetStub(*m_holder.stubInfo()); + return; + } + + if (m_key.kind() == PropertyCondition::Presence) { + // If this was a presence condition, let's watch the property for replacements. This is profitable + // for the DFG, which will want the replacement set to be valid in order to do constant folding. + VM& vm = *Heap::heap(m_key.object())->vm(); + m_key.object()->structure()->startWatchingPropertyForReplacements(vm, m_key.offset()); + } + + m_key.object()->structure()->addTransitionWatchpoint(this); +} + +WatchpointsOnStructureStubInfo::~WatchpointsOnStructureStubInfo() +{ +} + +StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::addWatchpoint(const ObjectPropertyCondition& key) +{ + return StructureStubClearingWatchpoint::push(key, *this, m_head); +} + +StructureStubClearingWatchpoint* WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint( + RefPtr<WatchpointsOnStructureStubInfo>& holderRef, CodeBlock* codeBlock, + StructureStubInfo* stubInfo, const ObjectPropertyCondition& key) +{ + if (!holderRef) + holderRef = adoptRef(new WatchpointsOnStructureStubInfo(codeBlock, stubInfo)); + else { + ASSERT(holderRef->m_codeBlock == codeBlock); + ASSERT(holderRef->m_stubInfo == stubInfo); + } + + return holderRef->addWatchpoint(key); +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h new file mode 100644 index 000000000..abacf3159 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef StructureStubClearingWatchpoint_h +#define StructureStubClearingWatchpoint_h + +#include "ObjectPropertyCondition.h" +#include "Watchpoint.h" + +#if ENABLE(JIT) + +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> +#include <wtf/RefCounted.h> +#include <wtf/RefPtr.h> + +namespace JSC { + +class CodeBlock; +class WatchpointsOnStructureStubInfo; +struct StructureStubInfo; + +class StructureStubClearingWatchpoint : public Watchpoint { + WTF_MAKE_NONCOPYABLE(StructureStubClearingWatchpoint); + WTF_MAKE_FAST_ALLOCATED; +public: + StructureStubClearingWatchpoint( + const ObjectPropertyCondition& key, + WatchpointsOnStructureStubInfo& holder, + std::unique_ptr<StructureStubClearingWatchpoint> next) + : m_key(key) + , m_holder(holder) + , m_next(WTF::move(next)) + { + } + + virtual ~StructureStubClearingWatchpoint(); + + static StructureStubClearingWatchpoint* push( + const ObjectPropertyCondition& key, + WatchpointsOnStructureStubInfo& holder, + std::unique_ptr<StructureStubClearingWatchpoint>& head); + +protected: + virtual void fireInternal(const FireDetail&) override; + +private: + ObjectPropertyCondition m_key; + WatchpointsOnStructureStubInfo& m_holder; + std::unique_ptr<StructureStubClearingWatchpoint> m_next; +}; + +class WatchpointsOnStructureStubInfo : public RefCounted<WatchpointsOnStructureStubInfo> { +public: + WatchpointsOnStructureStubInfo(CodeBlock* codeBlock, StructureStubInfo* stubInfo) + : m_codeBlock(codeBlock) + , m_stubInfo(stubInfo) + { + } + + ~WatchpointsOnStructureStubInfo(); + + StructureStubClearingWatchpoint* addWatchpoint(const ObjectPropertyCondition& key); + + static StructureStubClearingWatchpoint* ensureReferenceAndAddWatchpoint( + RefPtr<WatchpointsOnStructureStubInfo>& holderRef, + CodeBlock*, StructureStubInfo*, const ObjectPropertyCondition& key); + + CodeBlock* codeBlock() const { return m_codeBlock; } + StructureStubInfo* stubInfo() const { return m_stubInfo; } + +private: + CodeBlock* m_codeBlock; + StructureStubInfo* m_stubInfo; + std::unique_ptr<StructureStubClearingWatchpoint> m_head; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // StructureStubClearingWatchpoint_h + diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp new file mode 100644 index 000000000..6db79a09e --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2008, 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "StructureStubInfo.h" + +#include "JSObject.h" +#include "PolymorphicGetByIdList.h" +#include "PolymorphicPutByIdList.h" + +namespace JSC { + +#if ENABLE(JIT) +void StructureStubInfo::deref() +{ + switch (accessType) { + case access_get_by_id_list: { + delete u.getByIdList.list; + return; + } + case access_put_by_id_list: + delete u.putByIdList.list; + return; + case access_in_list: { + PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList; + delete polymorphicStructures; + return; + } + case access_put_by_id_transition_normal: + case access_put_by_id_transition_direct: + ObjectPropertyConditionSet::adoptRawPointer(u.putByIdTransition.rawConditionSet); + u.putByIdTransition.rawConditionSet = nullptr; + return; + case access_get_by_id_self: + case access_put_by_id_replace: + case access_unset: + // These instructions don't have to release any allocated memory + return; + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +bool StructureStubInfo::visitWeakReferences(RepatchBuffer& repatchBuffer) +{ + switch (accessType) { + case access_get_by_id_self: + if (!Heap::isMarked(u.getByIdSelf.baseObjectStructure.get())) + return false; + break; + case access_get_by_id_list: { + if (!u.getByIdList.list->visitWeak(repatchBuffer)) + return false; + break; + } + case access_put_by_id_transition_normal: + case access_put_by_id_transition_direct: + if (!Heap::isMarked(u.putByIdTransition.previousStructure.get()) + || !Heap::isMarked(u.putByIdTransition.structure.get())) + return false; + if (!ObjectPropertyConditionSet::fromRawPointer(u.putByIdTransition.rawConditionSet).areStillLive()) + return false; + break; + case access_put_by_id_replace: + if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get())) + return false; + break; + case access_put_by_id_list: + if (!u.putByIdList.list->visitWeak(repatchBuffer)) + return false; + break; + case access_in_list: { + PolymorphicAccessStructureList* polymorphicStructures = u.inList.structureList; + if (!polymorphicStructures->visitWeak(u.inList.listSize)) + return false; + break; + } + default: + // The rest of the instructions don't require references, so there is no need to + // do anything. + break; + } + return true; +} +#endif + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h new file mode 100644 index 000000000..e6c1ceed9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef StructureStubInfo_h +#define StructureStubInfo_h + +#include "CodeOrigin.h" +#include "Instruction.h" +#include "JITStubRoutine.h" +#include "MacroAssembler.h" +#include "ObjectPropertyConditionSet.h" +#include "Opcode.h" +#include "PolymorphicAccessStructureList.h" +#include "RegisterSet.h" +#include "SpillRegistersMode.h" +#include "Structure.h" +#include "StructureStubClearingWatchpoint.h" + +namespace JSC { + +#if ENABLE(JIT) + +class PolymorphicGetByIdList; +class PolymorphicPutByIdList; + +enum AccessType { + access_get_by_id_self, + access_get_by_id_list, + access_put_by_id_transition_normal, + access_put_by_id_transition_direct, + access_put_by_id_replace, + access_put_by_id_list, + access_unset, + access_in_list +}; + +inline bool isGetByIdAccess(AccessType accessType) +{ + switch (accessType) { + case access_get_by_id_self: + case access_get_by_id_list: + return true; + default: + return false; + } +} + +inline bool isPutByIdAccess(AccessType accessType) +{ + switch (accessType) { + case access_put_by_id_transition_normal: + case access_put_by_id_transition_direct: + case access_put_by_id_replace: + case access_put_by_id_list: + return true; + default: + return false; + } +} + +inline bool isInAccess(AccessType accessType) +{ + switch (accessType) { + case access_in_list: + return true; + default: + return false; + } +} + +struct StructureStubInfo { + StructureStubInfo() + : accessType(access_unset) + , seen(false) + , resetByGC(false) + , tookSlowPath(false) + { + } + + void initGetByIdSelf(VM& vm, JSCell* owner, Structure* baseObjectStructure) + { + accessType = access_get_by_id_self; + + u.getByIdSelf.baseObjectStructure.set(vm, owner, baseObjectStructure); + } + + void initGetByIdList(PolymorphicGetByIdList* list) + { + accessType = access_get_by_id_list; + u.getByIdList.list = list; + } + + // PutById* + + void initPutByIdTransition(VM& vm, JSCell* owner, Structure* previousStructure, Structure* structure, ObjectPropertyConditionSet conditionSet, bool isDirect) + { + if (isDirect) + accessType = access_put_by_id_transition_direct; + else + accessType = access_put_by_id_transition_normal; + + u.putByIdTransition.previousStructure.set(vm, owner, previousStructure); + u.putByIdTransition.structure.set(vm, owner, structure); + u.putByIdTransition.rawConditionSet = conditionSet.releaseRawPointer(); + } + + void initPutByIdReplace(VM& vm, JSCell* owner, Structure* baseObjectStructure) + { + accessType = access_put_by_id_replace; + + u.putByIdReplace.baseObjectStructure.set(vm, owner, baseObjectStructure); + } + + void initPutByIdList(PolymorphicPutByIdList* list) + { + accessType = access_put_by_id_list; + u.putByIdList.list = list; + } + + void initInList(PolymorphicAccessStructureList* list, int listSize) + { + accessType = access_in_list; + u.inList.structureList = list; + u.inList.listSize = listSize; + } + + void reset() + { + deref(); + accessType = access_unset; + stubRoutine = nullptr; + watchpoints = nullptr; + } + + void deref(); + + // Check if the stub has weak references that are dead. If there are dead ones that imply + // that the stub should be entirely reset, this should return false. If there are dead ones + // that can be handled internally by the stub and don't require a full reset, then this + // should reset them and return true. If there are no dead weak references, return true. + // If this method returns true it means that it has left the stub in a state where all + // outgoing GC pointers are known to point to currently marked objects; this method is + // allowed to accomplish this by either clearing those pointers somehow or by proving that + // they have already been marked. It is not allowed to mark new objects. + bool visitWeakReferences(RepatchBuffer&); + + bool seenOnce() + { + return seen; + } + + void setSeen() + { + seen = true; + } + + StructureStubClearingWatchpoint* addWatchpoint( + CodeBlock* codeBlock, const ObjectPropertyCondition& condition = ObjectPropertyCondition()) + { + return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint( + watchpoints, codeBlock, this, condition); + } + + int8_t accessType; + bool seen : 1; + bool resetByGC : 1; + bool tookSlowPath : 1; + + CodeOrigin codeOrigin; + + struct { + unsigned spillMode : 8; + int8_t baseGPR; +#if USE(JSVALUE32_64) + int8_t valueTagGPR; +#endif + int8_t valueGPR; + RegisterSet usedRegisters; + int32_t deltaCallToDone; + int32_t deltaCallToStorageLoad; + int32_t deltaCallToJump; + int32_t deltaCallToSlowCase; + int32_t deltaCheckImmToCall; +#if USE(JSVALUE64) + int32_t deltaCallToLoadOrStore; +#else + int32_t deltaCallToTagLoadOrStore; + int32_t deltaCallToPayloadLoadOrStore; +#endif + } patch; + + union { + struct { + // It would be unwise to put anything here, as it will surely be overwritten. + } unset; + struct { + WriteBarrierBase<Structure> baseObjectStructure; + } getByIdSelf; + struct { + PolymorphicGetByIdList* list; + } getByIdList; + struct { + WriteBarrierBase<Structure> previousStructure; + WriteBarrierBase<Structure> structure; + void* rawConditionSet; + } putByIdTransition; + struct { + WriteBarrierBase<Structure> baseObjectStructure; + } putByIdReplace; + struct { + PolymorphicPutByIdList* list; + } putByIdList; + struct { + PolymorphicAccessStructureList* structureList; + int listSize; + } inList; + } u; + + RefPtr<JITStubRoutine> stubRoutine; + CodeLocationCall callReturnLocation; + RefPtr<WatchpointsOnStructureStubInfo> watchpoints; +}; + +inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo) +{ + return structureStubInfo.codeOrigin; +} + +typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap; + +#else + +typedef HashMap<int, void*> StubInfoMap; + +#endif // ENABLE(JIT) + +} // namespace JSC + +#endif // StructureStubInfo_h diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.cpp b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp new file mode 100644 index 000000000..23d1e0800 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ToThisStatus.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ToThisStatus.h" + +namespace JSC { + +ToThisStatus merge(ToThisStatus a, ToThisStatus b) +{ + switch (a) { + case ToThisOK: + return b; + case ToThisConflicted: + return ToThisConflicted; + case ToThisClearedByGC: + if (b == ToThisConflicted) + return ToThisConflicted; + return ToThisClearedByGC; + } + + RELEASE_ASSERT_NOT_REACHED(); + return ToThisConflicted; +} + +} // namespace JSC + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, ToThisStatus status) +{ + switch (status) { + case ToThisOK: + out.print("OK"); + return; + case ToThisConflicted: + out.print("Conflicted"); + return; + case ToThisClearedByGC: + out.print("ClearedByGC"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + diff --git a/Source/JavaScriptCore/bytecode/ToThisStatus.h b/Source/JavaScriptCore/bytecode/ToThisStatus.h new file mode 100644 index 000000000..55d707c0f --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ToThisStatus.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ToThisStatus_h +#define ToThisStatus_h + +#include <wtf/PrintStream.h> + +namespace JSC { + +enum ToThisStatus { + ToThisOK, + ToThisConflicted, + ToThisClearedByGC +}; + +ToThisStatus merge(ToThisStatus, ToThisStatus); + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::ToThisStatus); + +} // namespace WTF + +#endif // ToThisStatus_h + diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.cpp b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp new file mode 100644 index 000000000..d98fa9759 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/TrackedReferences.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "TrackedReferences.h" + +#include "JSCInlines.h" +#include <wtf/CommaPrinter.h> + +namespace JSC { + +TrackedReferences::TrackedReferences() +{ +} + +TrackedReferences::~TrackedReferences() +{ +} + +void TrackedReferences::add(JSCell* cell) +{ + if (cell) + m_references.add(cell); +} + +void TrackedReferences::add(JSValue value) +{ + if (value.isCell()) + add(value.asCell()); +} + +void TrackedReferences::check(JSCell* cell) const +{ + if (!cell) + return; + + if (m_references.contains(cell)) + return; + + dataLog("Found untracked reference: ", RawPointer(cell), "\n"); + dataLog("All tracked references: ", *this, "\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +void TrackedReferences::check(JSValue value) const +{ + if (value.isCell()) + check(value.asCell()); +} + +void TrackedReferences::dump(PrintStream& out) const +{ + CommaPrinter comma; + for (JSCell* cell : m_references) + out.print(comma, RawPointer(cell)); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/TrackedReferences.h b/Source/JavaScriptCore/bytecode/TrackedReferences.h new file mode 100644 index 000000000..cc15e1ee7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/TrackedReferences.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef TrackedReferences_h +#define TrackedReferences_h + +#include "JSCJSValue.h" +#include "JSCell.h" +#include <wtf/HashSet.h> +#include <wtf/PrintStream.h> + +namespace JSC { + +class TrackedReferences { +public: + TrackedReferences(); + ~TrackedReferences(); + + void add(JSCell*); + void add(JSValue); + + void check(JSCell*) const; + void check(JSValue) const; + + void dump(PrintStream&) const; + +private: + HashSet<JSCell*> m_references; +}; + +} // namespace JSC + +#endif // TrackedReferences_h + diff --git a/Source/JavaScriptCore/bytecode/TypeLocation.h b/Source/JavaScriptCore/bytecode/TypeLocation.h new file mode 100644 index 000000000..ec07656ee --- /dev/null +++ b/Source/JavaScriptCore/bytecode/TypeLocation.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef TypeLocation_h +#define TypeLocation_h + +#include "TypeSet.h" + +namespace JSC { + +enum TypeProfilerGlobalIDFlags { + TypeProfilerNeedsUniqueIDGeneration = -1, + TypeProfilerNoGlobalIDExists = -2, + TypeProfilerReturnStatement = -3 +}; + +typedef intptr_t GlobalVariableID; + +class TypeLocation { +public: + TypeLocation() + : m_lastSeenType(TypeNothing) + , m_divotForFunctionOffsetIfReturnStatement(UINT_MAX) + , m_instructionTypeSet(TypeSet::create()) + , m_globalTypeSet(nullptr) + { + } + + GlobalVariableID m_globalVariableID; + RuntimeType m_lastSeenType; + intptr_t m_sourceID; + unsigned m_divotStart; + unsigned m_divotEnd; + unsigned m_divotForFunctionOffsetIfReturnStatement; + RefPtr<TypeSet> m_instructionTypeSet; + RefPtr<TypeSet> m_globalTypeSet; +}; + +} //namespace JSC + +#endif //TypeLocation_h diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp new file mode 100644 index 000000000..73959cfd9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2012, 2013, 2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "UnlinkedCodeBlock.h" + +#include "BytecodeGenerator.h" +#include "ClassInfo.h" +#include "CodeCache.h" +#include "Executable.h" +#include "ExecutableInfo.h" +#include "FunctionOverrides.h" +#include "JSString.h" +#include "JSCInlines.h" +#include "Parser.h" +#include "SourceProvider.h" +#include "Structure.h" +#include "SymbolTable.h" +#include "UnlinkedInstructionStream.h" +#include <wtf/DataLog.h> + +namespace JSC { + +const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) }; +const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) }; +const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) }; +const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) }; +const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) }; + +UnlinkedCodeBlock::UnlinkedCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info) + : Base(*vm, structure) + , m_numVars(0) + , m_numCalleeRegisters(0) + , m_numParameters(0) + , m_vm(vm) + , m_globalObjectRegister(VirtualRegister()) + , m_needsFullScopeChain(info.needsActivation()) + , m_usesEval(info.usesEval()) + , m_isStrictMode(info.isStrictMode()) + , m_isConstructor(info.isConstructor()) + , m_hasCapturedVariables(false) + , m_isBuiltinFunction(info.isBuiltinFunction()) + , m_constructorKind(static_cast<unsigned>(info.constructorKind())) + , m_firstLine(0) + , m_lineCount(0) + , m_endColumn(UINT_MAX) + , m_features(0) + , m_codeType(codeType) + , m_arrayProfileCount(0) + , m_arrayAllocationProfileCount(0) + , m_objectAllocationProfileCount(0) + , m_valueProfileCount(0) + , m_llintCallLinkInfoCount(0) +#if ENABLE(BYTECODE_COMMENTS) + , m_bytecodeCommentIterator(0) +#endif +{ + for (auto& constantRegisterIndex : m_linkTimeConstants) + constantRegisterIndex = 0; + ASSERT(m_constructorKind == static_cast<unsigned>(info.constructorKind())); +} + +void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); + Base::visitChildren(thisObject, visitor); + visitor.append(&thisObject->m_symbolTable); + for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr) + visitor.append(ptr); + for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr) + visitor.append(ptr); + visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size()); + if (thisObject->m_rareData) { + for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++) + visitor.append(&thisObject->m_rareData->m_regexps[i]); + } +} + +int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) +{ + ASSERT(bytecodeOffset < instructions().count()); + int divot; + int startOffset; + int endOffset; + unsigned line; + unsigned column; + expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); + return line; +} + +inline void UnlinkedCodeBlock::getLineAndColumn(ExpressionRangeInfo& info, + unsigned& line, unsigned& column) +{ + switch (info.mode) { + case ExpressionRangeInfo::FatLineMode: + info.decodeFatLineMode(line, column); + break; + case ExpressionRangeInfo::FatColumnMode: + info.decodeFatColumnMode(line, column); + break; + case ExpressionRangeInfo::FatLineAndColumnMode: { + unsigned fatIndex = info.position; + ExpressionRangeInfo::FatPosition& fatPos = m_rareData->m_expressionInfoFatPositions[fatIndex]; + line = fatPos.line; + column = fatPos.column; + break; + } + } // switch +} + +#ifndef NDEBUG +static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) +{ + const auto& instructions = instructionStream.unpackForDebugging(); + OpcodeID opcode = instructions[instructionOffset].u.opcode; + const char* event = ""; + if (opcode == op_debug) { + switch (instructions[instructionOffset + 1].u.operand) { + case WillExecuteProgram: event = " WillExecuteProgram"; break; + case DidExecuteProgram: event = " DidExecuteProgram"; break; + case DidEnterCallFrame: event = " DidEnterCallFrame"; break; + case DidReachBreakpoint: event = " DidReachBreakpoint"; break; + case WillLeaveCallFrame: event = " WillLeaveCallFrame"; break; + case WillExecuteStatement: event = " WillExecuteStatement"; break; + } + } + dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event); +} + +void UnlinkedCodeBlock::dumpExpressionRangeInfo() +{ + Vector<ExpressionRangeInfo>& expressionInfo = m_expressionInfo; + + size_t size = m_expressionInfo.size(); + dataLogF("UnlinkedCodeBlock %p expressionRangeInfo[%zu] {\n", this, size); + for (size_t i = 0; i < size; i++) { + ExpressionRangeInfo& info = expressionInfo[i]; + unsigned line; + unsigned column; + getLineAndColumn(info, line, column); + dumpLineColumnEntry(i, instructions(), info.instructionOffset, line, column); + } + dataLog("}\n"); +} +#endif + +void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, + int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) +{ + ASSERT(bytecodeOffset < instructions().count()); + + if (!m_expressionInfo.size()) { + startOffset = 0; + endOffset = 0; + divot = 0; + line = 0; + column = 0; + return; + } + + Vector<ExpressionRangeInfo>& expressionInfo = m_expressionInfo; + + int low = 0; + int high = expressionInfo.size(); + while (low < high) { + int mid = low + (high - low) / 2; + if (expressionInfo[mid].instructionOffset <= bytecodeOffset) + low = mid + 1; + else + high = mid; + } + + if (!low) + low = 1; + + ExpressionRangeInfo& info = expressionInfo[low - 1]; + startOffset = info.startOffset; + endOffset = info.endOffset; + divot = info.divotPoint; + getLineAndColumn(info, line, column); +} + +void UnlinkedCodeBlock::addExpressionInfo(unsigned instructionOffset, + int divot, int startOffset, int endOffset, unsigned line, unsigned column) +{ + if (divot > ExpressionRangeInfo::MaxDivot) { + // Overflow has occurred, we can only give line number info for errors for this region + divot = 0; + startOffset = 0; + endOffset = 0; + } else if (startOffset > ExpressionRangeInfo::MaxOffset) { + // If the start offset is out of bounds we clear both offsets + // so we only get the divot marker. Error message will have to be reduced + // to line and charPosition number. + startOffset = 0; + endOffset = 0; + } else if (endOffset > ExpressionRangeInfo::MaxOffset) { + // The end offset is only used for additional context, and is much more likely + // to overflow (eg. function call arguments) so we are willing to drop it without + // dropping the rest of the range. + endOffset = 0; + } + + unsigned positionMode = + (line <= ExpressionRangeInfo::MaxFatLineModeLine && column <= ExpressionRangeInfo::MaxFatLineModeColumn) + ? ExpressionRangeInfo::FatLineMode + : (line <= ExpressionRangeInfo::MaxFatColumnModeLine && column <= ExpressionRangeInfo::MaxFatColumnModeColumn) + ? ExpressionRangeInfo::FatColumnMode + : ExpressionRangeInfo::FatLineAndColumnMode; + + ExpressionRangeInfo info; + info.instructionOffset = instructionOffset; + info.divotPoint = divot; + info.startOffset = startOffset; + info.endOffset = endOffset; + + info.mode = positionMode; + switch (positionMode) { + case ExpressionRangeInfo::FatLineMode: + info.encodeFatLineMode(line, column); + break; + case ExpressionRangeInfo::FatColumnMode: + info.encodeFatColumnMode(line, column); + break; + case ExpressionRangeInfo::FatLineAndColumnMode: { + createRareDataIfNecessary(); + unsigned fatIndex = m_rareData->m_expressionInfoFatPositions.size(); + ExpressionRangeInfo::FatPosition fatPos = { line, column }; + m_rareData->m_expressionInfoFatPositions.append(fatPos); + info.position = fatIndex; + } + } // switch + + m_expressionInfo.append(info); +} + +bool UnlinkedCodeBlock::typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot) +{ + static const bool verbose = false; + auto iter = m_typeProfilerInfoMap.find(bytecodeOffset); + if (iter == m_typeProfilerInfoMap.end()) { + if (verbose) + dataLogF("Don't have assignment info for offset:%u\n", bytecodeOffset); + startDivot = UINT_MAX; + endDivot = UINT_MAX; + return false; + } + + TypeProfilerExpressionRange& range = iter->value; + startDivot = range.m_startDivot; + endDivot = range.m_endDivot; + return true; +} + +void UnlinkedCodeBlock::addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot) +{ + TypeProfilerExpressionRange range; + range.m_startDivot = startDivot; + range.m_endDivot = endDivot; + m_typeProfilerInfoMap.set(instructionOffset, range); +} + +void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedProgramCodeBlock* thisObject = jsCast<UnlinkedProgramCodeBlock*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); + Base::visitChildren(thisObject, visitor); +} + +UnlinkedCodeBlock::~UnlinkedCodeBlock() +{ +} + +void UnlinkedProgramCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedProgramCodeBlock*>(cell)->~UnlinkedProgramCodeBlock(); +} + +void UnlinkedEvalCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedEvalCodeBlock*>(cell)->~UnlinkedEvalCodeBlock(); +} + +void UnlinkedFunctionCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedFunctionCodeBlock*>(cell)->~UnlinkedFunctionCodeBlock(); +} + +void UnlinkedFunctionExecutable::destroy(JSCell* cell) +{ + jsCast<UnlinkedFunctionExecutable*>(cell)->~UnlinkedFunctionExecutable(); +} + +void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions) +{ + m_unlinkedInstructions = WTF::move(instructions); +} + +const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const +{ + ASSERT(m_unlinkedInstructions.get()); + return *m_unlinkedInstructions; +} + +} + diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h new file mode 100644 index 000000000..552ed84a2 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h @@ -0,0 +1,588 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UnlinkedCodeBlock_h +#define UnlinkedCodeBlock_h + +#include "BytecodeConventions.h" +#include "CodeSpecializationKind.h" +#include "CodeType.h" +#include "ConstructAbility.h" +#include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" +#include "Identifier.h" +#include "JSCell.h" +#include "JSString.h" +#include "ParserModes.h" +#include "RegExp.h" +#include "SpecialPointer.h" +#include "SymbolTable.h" +#include "UnlinkedFunctionExecutable.h" +#include "VariableEnvironment.h" +#include "VirtualRegister.h" +#include <wtf/RefCountedArray.h> +#include <wtf/Vector.h> + +namespace JSC { + +class Debugger; +class FunctionMetadataNode; +class FunctionExecutable; +class JSScope; +class ParserError; +class ScriptExecutable; +class SourceCode; +class SourceProvider; +class SymbolTable; +class UnlinkedCodeBlock; +class UnlinkedFunctionCodeBlock; +class UnlinkedFunctionExecutable; +class UnlinkedInstructionStream; +struct ExecutableInfo; + +typedef unsigned UnlinkedValueProfile; +typedef unsigned UnlinkedArrayProfile; +typedef unsigned UnlinkedArrayAllocationProfile; +typedef unsigned UnlinkedObjectAllocationProfile; +typedef unsigned UnlinkedLLIntCallLinkInfo; + +struct UnlinkedStringJumpTable { + typedef HashMap<RefPtr<StringImpl>, int32_t> StringOffsetTable; + StringOffsetTable offsetTable; + + inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset) + { + StringOffsetTable::const_iterator end = offsetTable.end(); + StringOffsetTable::const_iterator loc = offsetTable.find(value); + if (loc == end) + return defaultOffset; + return loc->value; + } + +}; + +struct UnlinkedSimpleJumpTable { + Vector<int32_t> branchOffsets; + int32_t min; + + int32_t offsetForValue(int32_t value, int32_t defaultOffset); + void add(int32_t key, int32_t offset) + { + if (!branchOffsets[key]) + branchOffsets[key] = offset; + } +}; + +struct UnlinkedInstruction { + UnlinkedInstruction() { u.operand = 0; } + UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; } + UnlinkedInstruction(int operand) { u.operand = operand; } + union { + OpcodeID opcode; + int32_t operand; + unsigned index; + } u; +}; + +class UnlinkedCodeBlock : public JSCell { +public: + typedef JSCell Base; + static const unsigned StructureFlags = Base::StructureFlags; + + static const bool needsDestruction = true; + + enum { CallFunction, ApplyFunction }; + + bool isConstructor() const { return m_isConstructor; } + bool isStrictMode() const { return m_isStrictMode; } + bool usesEval() const { return m_usesEval; } + + bool needsFullScopeChain() const { return m_needsFullScopeChain; } + + void addExpressionInfo(unsigned instructionOffset, int divot, + int startOffset, int endOffset, unsigned line, unsigned column); + + void addTypeProfilerExpressionInfo(unsigned instructionOffset, unsigned startDivot, unsigned endDivot); + + bool hasExpressionInfo() { return m_expressionInfo.size(); } + + // Special registers + void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; } + void setScopeRegister(VirtualRegister scopeRegister) { m_scopeRegister = scopeRegister; } + void setActivationRegister(VirtualRegister activationRegister) { m_lexicalEnvironmentRegister = activationRegister; } + + bool usesGlobalObject() const { return m_globalObjectRegister.isValid(); } + void setGlobalObjectRegister(VirtualRegister globalObjectRegister) { m_globalObjectRegister = globalObjectRegister; } + VirtualRegister globalObjectRegister() const { return m_globalObjectRegister; } + + // Parameter information + void setNumParameters(int newValue) { m_numParameters = newValue; } + void addParameter() { m_numParameters++; } + unsigned numParameters() const { return m_numParameters; } + + unsigned addRegExp(RegExp* r) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_regexps.size(); + m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_vm, this, r)); + return size; + } + unsigned numberOfRegExps() const + { + if (!m_rareData) + return 0; + return m_rareData->m_regexps.size(); + } + RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); } + + // Constant Pools + + size_t numberOfIdentifiers() const { return m_identifiers.size(); } + void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } + const Identifier& identifier(int index) const { return m_identifiers[index]; } + const Vector<Identifier>& identifiers() const { return m_identifiers; } + + unsigned addConstant(JSValue v, SourceCodeRepresentation sourceCodeRepresentation = SourceCodeRepresentation::Other) + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantRegisters.last().set(*m_vm, this, v); + m_constantsSourceCodeRepresentation.append(sourceCodeRepresentation); + return result; + } + unsigned addConstant(LinkTimeConstant type) + { + unsigned result = m_constantRegisters.size(); + ASSERT(result); + unsigned index = static_cast<unsigned>(type); + ASSERT(index < LinkTimeConstantCount); + m_linkTimeConstants[index] = result; + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); + return result; + } + unsigned registerIndexForLinkTimeConstant(LinkTimeConstant type) + { + unsigned index = static_cast<unsigned>(type); + ASSERT(index < LinkTimeConstantCount); + return m_linkTimeConstants[index]; + } + const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; } + const WriteBarrier<Unknown>& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; } + ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } + const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; } + + // Jumps + size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } + void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); } + unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } + unsigned lastJumpTarget() const { return m_jumpTargets.last(); } + + bool isBuiltinFunction() const { return m_isBuiltinFunction; } + + ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); } + + void shrinkToFit() + { + m_jumpTargets.shrinkToFit(); + m_identifiers.shrinkToFit(); + m_constantRegisters.shrinkToFit(); + m_constantsSourceCodeRepresentation.shrinkToFit(); + m_functionDecls.shrinkToFit(); + m_functionExprs.shrinkToFit(); + m_propertyAccessInstructions.shrinkToFit(); + m_expressionInfo.shrinkToFit(); + +#if ENABLE(BYTECODE_COMMENTS) + m_bytecodeComments.shrinkToFit(); +#endif + if (m_rareData) { + m_rareData->m_exceptionHandlers.shrinkToFit(); + m_rareData->m_regexps.shrinkToFit(); + m_rareData->m_constantBuffers.shrinkToFit(); + m_rareData->m_switchJumpTables.shrinkToFit(); + m_rareData->m_stringSwitchJumpTables.shrinkToFit(); + m_rareData->m_expressionInfoFatPositions.shrinkToFit(); + } + } + + void setInstructions(std::unique_ptr<UnlinkedInstructionStream>); + const UnlinkedInstructionStream& instructions() const; + + int m_numVars; + int m_numCapturedVars; + int m_numCalleeRegisters; + + // Jump Tables + + size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } + UnlinkedSimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } + UnlinkedSimpleJumpTable& switchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } + + size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } + UnlinkedStringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(UnlinkedStringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } + UnlinkedStringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } + + unsigned addFunctionDecl(UnlinkedFunctionExecutable* n) + { + unsigned size = m_functionDecls.size(); + m_functionDecls.append(WriteBarrier<UnlinkedFunctionExecutable>()); + m_functionDecls.last().set(*m_vm, this, n); + return size; + } + UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } + size_t numberOfFunctionDecls() { return m_functionDecls.size(); } + unsigned addFunctionExpr(UnlinkedFunctionExecutable* n) + { + unsigned size = m_functionExprs.size(); + m_functionExprs.append(WriteBarrier<UnlinkedFunctionExecutable>()); + m_functionExprs.last().set(*m_vm, this, n); + return size; + } + UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } + size_t numberOfFunctionExprs() { return m_functionExprs.size(); } + + // Exception handling support + size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } + void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); } + UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } + + VM* vm() const { return m_vm; } + + UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; } + unsigned numberOfArrayProfiles() { return m_arrayProfileCount; } + UnlinkedArrayAllocationProfile addArrayAllocationProfile() { return m_arrayAllocationProfileCount++; } + unsigned numberOfArrayAllocationProfiles() { return m_arrayAllocationProfileCount; } + UnlinkedObjectAllocationProfile addObjectAllocationProfile() { return m_objectAllocationProfileCount++; } + unsigned numberOfObjectAllocationProfiles() { return m_objectAllocationProfileCount; } + UnlinkedValueProfile addValueProfile() { return m_valueProfileCount++; } + unsigned numberOfValueProfiles() { return m_valueProfileCount; } + + UnlinkedLLIntCallLinkInfo addLLIntCallLinkInfo() { return m_llintCallLinkInfoCount++; } + unsigned numberOfLLintCallLinkInfos() { return m_llintCallLinkInfoCount; } + + CodeType codeType() const { return m_codeType; } + + VirtualRegister thisRegister() const { return m_thisRegister; } + VirtualRegister scopeRegister() const { return m_scopeRegister; } + VirtualRegister activationRegister() const { return m_lexicalEnvironmentRegister; } + bool hasActivationRegister() const { return m_lexicalEnvironmentRegister.isValid(); } + + void addPropertyAccessInstruction(unsigned propertyAccessInstruction) + { + m_propertyAccessInstructions.append(propertyAccessInstruction); + } + + size_t numberOfPropertyAccessInstructions() const { return m_propertyAccessInstructions.size(); } + const Vector<unsigned>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } + + typedef Vector<JSValue> ConstantBuffer; + + size_t constantBufferCount() { ASSERT(m_rareData); return m_rareData->m_constantBuffers.size(); } + unsigned addConstantBuffer(unsigned length) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_constantBuffers.size(); + m_rareData->m_constantBuffers.append(Vector<JSValue>(length)); + return size; + } + + const ConstantBuffer& constantBuffer(unsigned index) const + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + + ConstantBuffer& constantBuffer(unsigned index) + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + + bool hasRareData() const { return m_rareData.get(); } + + int lineNumberForBytecodeOffset(unsigned bytecodeOffset); + + void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, + int& startOffset, int& endOffset, unsigned& line, unsigned& column); + + bool typeProfilerExpressionInfoForBytecodeOffset(unsigned bytecodeOffset, unsigned& startDivot, unsigned& endDivot); + + void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount, unsigned endColumn) + { + m_features = features; + m_hasCapturedVariables = hasCapturedVariables; + m_firstLine = firstLine; + m_lineCount = lineCount; + // For the UnlinkedCodeBlock, startColumn is always 0. + m_endColumn = endColumn; + } + + CodeFeatures codeFeatures() const { return m_features; } + bool hasCapturedVariables() const { return m_hasCapturedVariables; } + unsigned firstLine() const { return m_firstLine; } + unsigned lineCount() const { return m_lineCount; } + ALWAYS_INLINE unsigned startColumn() const { return 0; } + unsigned endColumn() const { return m_endColumn; } + + void addOpProfileControlFlowBytecodeOffset(size_t offset) { m_opProfileControlFlowBytecodeOffsets.append(offset); } + const Vector<size_t>& opProfileControlFlowBytecodeOffsets() const { return m_opProfileControlFlowBytecodeOffsets; } + + void dumpExpressionRangeInfo(); // For debugging purpose only. + +protected: + UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&); + ~UnlinkedCodeBlock(); + + void finishCreation(VM& vm) + { + Base::finishCreation(vm); + if (codeType() == GlobalCode) + return; + m_symbolTable.set(vm, this, SymbolTable::create(vm)); + } + +private: + + void createRareDataIfNecessary() + { + if (!m_rareData) + m_rareData = std::make_unique<RareData>(); + } + + void getLineAndColumn(ExpressionRangeInfo&, unsigned& line, unsigned& column); + + std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions; + + int m_numParameters; + VM* m_vm; + + VirtualRegister m_thisRegister; + VirtualRegister m_scopeRegister; + VirtualRegister m_lexicalEnvironmentRegister; + VirtualRegister m_globalObjectRegister; + + unsigned m_needsFullScopeChain : 1; + unsigned m_usesEval : 1; + unsigned m_isStrictMode : 1; + unsigned m_isConstructor : 1; + unsigned m_hasCapturedVariables : 1; + unsigned m_isBuiltinFunction : 1; + unsigned m_constructorKind : 2; + + unsigned m_firstLine; + unsigned m_lineCount; + unsigned m_endColumn; + + CodeFeatures m_features; + CodeType m_codeType; + + Vector<unsigned> m_jumpTargets; + + // Constant Pools + Vector<Identifier> m_identifiers; + Vector<WriteBarrier<Unknown>> m_constantRegisters; + Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation; + std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants; + typedef Vector<WriteBarrier<UnlinkedFunctionExecutable>> FunctionExpressionVector; + FunctionExpressionVector m_functionDecls; + FunctionExpressionVector m_functionExprs; + + WriteBarrier<SymbolTable> m_symbolTable; + + Vector<unsigned> m_propertyAccessInstructions; + +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment> m_bytecodeComments; + size_t m_bytecodeCommentIterator; +#endif + + unsigned m_arrayProfileCount; + unsigned m_arrayAllocationProfileCount; + unsigned m_objectAllocationProfileCount; + unsigned m_valueProfileCount; + unsigned m_llintCallLinkInfoCount; + +public: + struct RareData { + WTF_MAKE_FAST_ALLOCATED; + public: + Vector<UnlinkedHandlerInfo> m_exceptionHandlers; + + // Rare Constants + Vector<WriteBarrier<RegExp>> m_regexps; + + // Buffers used for large array literals + Vector<ConstantBuffer> m_constantBuffers; + + // Jump Tables + Vector<UnlinkedSimpleJumpTable> m_switchJumpTables; + Vector<UnlinkedStringJumpTable> m_stringSwitchJumpTables; + + Vector<ExpressionRangeInfo::FatPosition> m_expressionInfoFatPositions; + }; + +private: + std::unique_ptr<RareData> m_rareData; + Vector<ExpressionRangeInfo> m_expressionInfo; + struct TypeProfilerExpressionRange { + unsigned m_startDivot; + unsigned m_endDivot; + }; + HashMap<unsigned, TypeProfilerExpressionRange> m_typeProfilerInfoMap; + Vector<size_t> m_opProfileControlFlowBytecodeOffsets; + +protected: + static void visitChildren(JSCell*, SlotVisitor&); + +public: + DECLARE_INFO; +}; + +class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock { +public: + typedef UnlinkedCodeBlock Base; + +protected: + UnlinkedGlobalCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info) + : Base(vm, structure, codeType, info) + { + } + + DECLARE_INFO; +}; + +class UnlinkedProgramCodeBlock final : public UnlinkedGlobalCodeBlock { +private: + friend class CodeCache; + static UnlinkedProgramCodeBlock* create(VM* vm, const ExecutableInfo& info) + { + UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedProgramCodeBlock>(vm->heap)) UnlinkedProgramCodeBlock(vm, vm->unlinkedProgramCodeBlockStructure.get(), info); + instance->finishCreation(*vm); + return instance; + } + +public: + typedef UnlinkedGlobalCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + + static void destroy(JSCell*); + + void setVariableDeclarations(const VariableEnvironment& environment) { m_varDeclarations = environment; } + const VariableEnvironment& variableDeclarations() const { return m_varDeclarations; } + + static void visitChildren(JSCell*, SlotVisitor&); + +private: + UnlinkedProgramCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info) + : Base(vm, structure, GlobalCode, info) + { + } + + VariableEnvironment m_varDeclarations; + +public: + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), info()); + } + + DECLARE_INFO; +}; + +class UnlinkedEvalCodeBlock final : public UnlinkedGlobalCodeBlock { +private: + friend class CodeCache; + + static UnlinkedEvalCodeBlock* create(VM* vm, const ExecutableInfo& info) + { + UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell<UnlinkedEvalCodeBlock>(vm->heap)) UnlinkedEvalCodeBlock(vm, vm->unlinkedEvalCodeBlockStructure.get(), info); + instance->finishCreation(*vm); + return instance; + } + +public: + typedef UnlinkedGlobalCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + + static void destroy(JSCell*); + + const Identifier& variable(unsigned index) { return m_variables[index]; } + unsigned numVariables() { return m_variables.size(); } + void adoptVariables(Vector<Identifier, 0, UnsafeVectorOverflow>& variables) + { + ASSERT(m_variables.isEmpty()); + m_variables.swap(variables); + } + +private: + UnlinkedEvalCodeBlock(VM* vm, Structure* structure, const ExecutableInfo& info) + : Base(vm, structure, EvalCode, info) + { + } + + Vector<Identifier, 0, UnsafeVectorOverflow> m_variables; + +public: + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), info()); + } + + DECLARE_INFO; +}; + +class UnlinkedFunctionCodeBlock final : public UnlinkedCodeBlock { +public: + typedef UnlinkedCodeBlock Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + + static UnlinkedFunctionCodeBlock* create(VM* vm, CodeType codeType, const ExecutableInfo& info) + { + UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(vm->heap)) UnlinkedFunctionCodeBlock(vm, vm->unlinkedFunctionCodeBlockStructure.get(), codeType, info); + instance->finishCreation(*vm); + return instance; + } + + static void destroy(JSCell*); + +private: + UnlinkedFunctionCodeBlock(VM* vm, Structure* structure, CodeType codeType, const ExecutableInfo& info) + : Base(vm, structure, codeType, info) + { + } + +public: + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), info()); + } + + DECLARE_INFO; +}; + +} + +#endif // UnlinkedCodeBlock_h diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp new file mode 100644 index 000000000..1891cb8c5 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.cpp @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2012, 2013, 2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "UnlinkedFunctionExecutable.h" + +#include "BytecodeGenerator.h" +#include "ClassInfo.h" +#include "CodeCache.h" +#include "Executable.h" +#include "ExecutableInfo.h" +#include "FunctionOverrides.h" +#include "JSCInlines.h" +#include "JSString.h" +#include "Parser.h" +#include "SourceProvider.h" +#include "Structure.h" +#include "SymbolTable.h" +#include "UnlinkedInstructionStream.h" +#include <wtf/DataLog.h> + +namespace JSC { + +static_assert(sizeof(UnlinkedFunctionExecutable) <= 256, "UnlinkedFunctionExecutable should fit in a 256-byte cell."); + +const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) }; + +static UnlinkedFunctionCodeBlock* generateFunctionCodeBlock( + VM& vm, UnlinkedFunctionExecutable* executable, const SourceCode& source, + CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, + UnlinkedFunctionKind functionKind, ParserError& error) +{ + JSParserBuiltinMode builtinMode = executable->isBuiltinFunction() ? JSParserBuiltinMode::Builtin : JSParserBuiltinMode::NotBuiltin; + JSParserStrictMode strictMode = executable->isInStrictContext() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict; + ASSERT(isFunctionParseMode(executable->parseMode())); + std::unique_ptr<FunctionNode> function = parse<FunctionNode>( + &vm, source, executable->name(), builtinMode, strictMode, executable->parseMode(), error, nullptr); + + if (!function) { + ASSERT(error.isValid()); + return nullptr; + } + + function->finishParsing(executable->name(), executable->functionMode()); + executable->recordParse(function->features(), function->hasCapturedVariables()); + + UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&vm, FunctionCode, + ExecutableInfo(function->needsActivation(), function->usesEval(), function->isStrictMode(), kind == CodeForConstruct, functionKind == UnlinkedBuiltinFunction, executable->constructorKind())); + auto generator(std::make_unique<BytecodeGenerator>(vm, function.get(), result, debuggerMode, profilerMode, executable->parentScopeTDZVariables())); + error = generator->generate(); + if (error.isValid()) + return nullptr; + return result; +} + +UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(VM* vm, Structure* structure, const SourceCode& source, RefPtr<SourceProvider>&& sourceOverride, FunctionMetadataNode* node, UnlinkedFunctionKind kind, ConstructAbility constructAbility, VariableEnvironment& parentScopeTDZVariables) + : Base(*vm, structure) + , m_name(node->ident()) + , m_inferredName(node->inferredName()) + , m_sourceOverride(WTF::move(sourceOverride)) + , m_firstLineOffset(node->firstLine() - source.firstLine()) + , m_lineCount(node->lastLine() - node->firstLine()) + , m_unlinkedFunctionNameStart(node->functionNameStart() - source.startOffset()) + , m_unlinkedBodyStartColumn(node->startColumn()) + , m_unlinkedBodyEndColumn(m_lineCount ? node->endColumn() : node->endColumn() - node->startColumn()) + , m_startOffset(node->source().startOffset() - source.startOffset()) + , m_sourceLength(node->source().length()) + , m_parametersStartOffset(node->parametersStart()) + , m_typeProfilingStartOffset(node->functionKeywordStart()) + , m_typeProfilingEndOffset(node->startStartOffset() + node->source().length() - 1) + , m_parameterCount(node->parameterCount()) + , m_parseMode(node->parseMode()) + , m_features(0) + , m_isInStrictContext(node->isInStrictContext()) + , m_hasCapturedVariables(false) + , m_isBuiltinFunction(kind == UnlinkedBuiltinFunction) + , m_constructAbility(static_cast<unsigned>(constructAbility)) + , m_constructorKind(static_cast<unsigned>(node->constructorKind())) + , m_functionMode(node->functionMode()) +{ + ASSERT(m_constructorKind == static_cast<unsigned>(node->constructorKind())); + m_parentScopeTDZVariables.swap(parentScopeTDZVariables); +} + +void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, info()); + Base::visitChildren(thisObject, visitor); + visitor.append(&thisObject->m_codeBlockForCall); + visitor.append(&thisObject->m_codeBlockForConstruct); + visitor.append(&thisObject->m_nameValue); +} + +FunctionExecutable* UnlinkedFunctionExecutable::link(VM& vm, const SourceCode& ownerSource, int overrideLineNumber) +{ + SourceCode source = m_sourceOverride ? SourceCode(m_sourceOverride) : ownerSource; + unsigned firstLine = source.firstLine() + m_firstLineOffset; + unsigned startOffset = source.startOffset() + m_startOffset; + unsigned lineCount = m_lineCount; + + // Adjust to one-based indexing. + bool startColumnIsOnFirstSourceLine = !m_firstLineOffset; + unsigned startColumn = m_unlinkedBodyStartColumn + (startColumnIsOnFirstSourceLine ? source.startColumn() : 1); + bool endColumnIsOnStartLine = !lineCount; + unsigned endColumn = m_unlinkedBodyEndColumn + (endColumnIsOnStartLine ? startColumn : 1); + + SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine, startColumn); + FunctionOverrides::OverrideInfo overrideInfo; + bool hasFunctionOverride = false; + + if (UNLIKELY(Options::functionOverrides())) { + hasFunctionOverride = FunctionOverrides::initializeOverrideFor(code, overrideInfo); + if (hasFunctionOverride) { + firstLine = overrideInfo.firstLine; + lineCount = overrideInfo.lineCount; + startColumn = overrideInfo.startColumn; + endColumn = overrideInfo.endColumn; + code = overrideInfo.sourceCode; + } + } + + FunctionExecutable* result = FunctionExecutable::create(vm, code, this, firstLine, firstLine + lineCount, startColumn, endColumn); + if (overrideLineNumber != -1) + result->setOverrideLineNumber(overrideLineNumber); + + if (UNLIKELY(hasFunctionOverride)) { + result->overrideParameterAndTypeProfilingStartEndOffsets( + overrideInfo.parametersStartOffset, + overrideInfo.typeProfilingStartOffset, + overrideInfo.typeProfilingEndOffset); + } + + return result; +} + +UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode( + const Identifier& name, ExecState& exec, const SourceCode& source, + JSObject*& exception, int overrideLineNumber) +{ + ParserError error; + VM& vm = exec.vm(); + CodeCache* codeCache = vm.codeCache(); + UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(vm, name, source, error); + + auto& globalObject = *exec.lexicalGlobalObject(); + if (globalObject.hasDebugger()) + globalObject.debugger()->sourceParsed(&exec, source.provider(), error.line(), error.message()); + + if (error.isValid()) { + exception = error.toErrorObject(&globalObject, source, overrideLineNumber); + return nullptr; + } + + return executable; +} + +UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor( + VM& vm, const SourceCode& source, CodeSpecializationKind specializationKind, + DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + switch (specializationKind) { + case CodeForCall: + if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForCall.get()) + return codeBlock; + break; + case CodeForConstruct: + if (UnlinkedFunctionCodeBlock* codeBlock = m_codeBlockForConstruct.get()) + return codeBlock; + break; + } + + UnlinkedFunctionCodeBlock* result = generateFunctionCodeBlock( + vm, this, source, specializationKind, debuggerMode, profilerMode, + isBuiltinFunction() ? UnlinkedBuiltinFunction : UnlinkedNormalFunction, + error); + + if (error.isValid()) + return nullptr; + + switch (specializationKind) { + case CodeForCall: + m_codeBlockForCall.set(vm, this, result); + break; + case CodeForConstruct: + m_codeBlockForConstruct.set(vm, this, result); + break; + } + return result; +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h new file mode 100644 index 000000000..6eae9f0a2 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedFunctionExecutable.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UnlinkedFunctionExecutable_h +#define UnlinkedFunctionExecutable_h + +#include "BytecodeConventions.h" +#include "CodeSpecializationKind.h" +#include "CodeType.h" +#include "ConstructAbility.h" +#include "ExpressionRangeInfo.h" +#include "HandlerInfo.h" +#include "Identifier.h" +#include "JSCell.h" +#include "JSString.h" +#include "ParserModes.h" +#include "RegExp.h" +#include "SpecialPointer.h" +#include "VariableEnvironment.h" +#include "VirtualRegister.h" +#include <wtf/RefCountedArray.h> +#include <wtf/Vector.h> + +namespace JSC { + +class FunctionMetadataNode; +class FunctionExecutable; +class ParserError; +class SourceCode; +class SourceProvider; +class UnlinkedFunctionCodeBlock; + +enum UnlinkedFunctionKind { + UnlinkedNormalFunction, + UnlinkedBuiltinFunction, +}; + +class UnlinkedFunctionExecutable final : public JSCell { +public: + friend class BuiltinExecutables; + friend class CodeCache; + friend class VM; + + typedef JSCell Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; + + static UnlinkedFunctionExecutable* create(VM* vm, const SourceCode& source, FunctionMetadataNode* node, UnlinkedFunctionKind unlinkedFunctionKind, ConstructAbility constructAbility, VariableEnvironment& parentScopeTDZVariables, RefPtr<SourceProvider>&& sourceOverride = nullptr) + { + UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(vm->heap)) + UnlinkedFunctionExecutable(vm, vm->unlinkedFunctionExecutableStructure.get(), source, WTF::move(sourceOverride), node, unlinkedFunctionKind, constructAbility, parentScopeTDZVariables); + instance->finishCreation(*vm); + return instance; + } + + const Identifier& name() const { return m_name; } + const Identifier& inferredName() const { return m_inferredName; } + JSString* nameValue() const { return m_nameValue.get(); } + unsigned parameterCount() const { return m_parameterCount; }; + SourceParseMode parseMode() const { return m_parseMode; }; + bool isInStrictContext() const { return m_isInStrictContext; } + FunctionMode functionMode() const { return static_cast<FunctionMode>(m_functionMode); } + ConstructorKind constructorKind() const { return static_cast<ConstructorKind>(m_constructorKind); } + + unsigned unlinkedFunctionNameStart() const { return m_unlinkedFunctionNameStart; } + unsigned unlinkedBodyStartColumn() const { return m_unlinkedBodyStartColumn; } + unsigned unlinkedBodyEndColumn() const { return m_unlinkedBodyEndColumn; } + unsigned startOffset() const { return m_startOffset; } + unsigned sourceLength() { return m_sourceLength; } + unsigned parametersStartOffset() const { return m_parametersStartOffset; } + unsigned typeProfilingStartOffset() const { return m_typeProfilingStartOffset; } + unsigned typeProfilingEndOffset() const { return m_typeProfilingEndOffset; } + + UnlinkedFunctionCodeBlock* codeBlockFor( + VM&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, + ParserError&); + + static UnlinkedFunctionExecutable* fromGlobalCode( + const Identifier&, ExecState&, const SourceCode&, JSObject*& exception, + int overrideLineNumber); + + FunctionExecutable* link(VM&, const SourceCode&, int overrideLineNumber = -1); + + void clearCodeForRecompilation() + { + m_codeBlockForCall.clear(); + m_codeBlockForConstruct.clear(); + } + + void recordParse(CodeFeatures features, bool hasCapturedVariables) + { + m_features = features; + m_hasCapturedVariables = hasCapturedVariables; + } + + CodeFeatures features() const { return m_features; } + bool hasCapturedVariables() const { return m_hasCapturedVariables; } + + static const bool needsDestruction = true; + static void destroy(JSCell*); + + bool isBuiltinFunction() const { return m_isBuiltinFunction; } + ConstructAbility constructAbility() const { return static_cast<ConstructAbility>(m_constructAbility); } + bool isClassConstructorFunction() const { return constructorKind() != ConstructorKind::None; } + const VariableEnvironment* parentScopeTDZVariables() const { return &m_parentScopeTDZVariables; } + +private: + UnlinkedFunctionExecutable(VM*, Structure*, const SourceCode&, RefPtr<SourceProvider>&& sourceOverride, FunctionMetadataNode*, UnlinkedFunctionKind, ConstructAbility, VariableEnvironment&); + WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForCall; + WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct; + + Identifier m_name; + Identifier m_inferredName; + WriteBarrier<JSString> m_nameValue; + RefPtr<SourceProvider> m_sourceOverride; + VariableEnvironment m_parentScopeTDZVariables; + unsigned m_firstLineOffset; + unsigned m_lineCount; + unsigned m_unlinkedFunctionNameStart; + unsigned m_unlinkedBodyStartColumn; + unsigned m_unlinkedBodyEndColumn; + unsigned m_startOffset; + unsigned m_sourceLength; + unsigned m_parametersStartOffset; + unsigned m_typeProfilingStartOffset; + unsigned m_typeProfilingEndOffset; + unsigned m_parameterCount; + SourceParseMode m_parseMode; + + CodeFeatures m_features; + + unsigned m_isInStrictContext : 1; + unsigned m_hasCapturedVariables : 1; + unsigned m_isBuiltinFunction : 1; + unsigned m_constructAbility: 1; + unsigned m_constructorKind : 2; + unsigned m_functionMode : 1; // FunctionMode + +protected: + void finishCreation(VM& vm) + { + Base::finishCreation(vm); + m_nameValue.set(vm, this, jsString(&vm, name().string())); + } + + static void visitChildren(JSCell*, SlotVisitor&); + +public: + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(vm, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), info()); + } + + DECLARE_EXPORT_INFO; +}; + +} // namespace JSC + +#endif // UnlinkedFunctionExecutable_h diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp new file mode 100644 index 000000000..568dbb682 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "UnlinkedInstructionStream.h" + +namespace JSC { + +static void append8(unsigned char*& ptr, unsigned char value) +{ + *(ptr++) = value; +} + +static void append32(unsigned char*& ptr, unsigned value) +{ + if (!(value & 0xffffffe0)) { + *(ptr++) = value; + return; + } + + if ((value & 0xffffffe0) == 0xffffffe0) { + *(ptr++) = (Negative5Bit << 5) | (value & 0x1f); + return; + } + + if ((value & 0xffffffe0) == 0x40000000) { + *(ptr++) = (ConstantRegister5Bit << 5) | (value & 0x1f); + return; + } + + if (!(value & 0xffffe000)) { + *(ptr++) = (Positive13Bit << 5) | ((value >> 8) & 0x1f); + *(ptr++) = value & 0xff; + return; + } + + if ((value & 0xffffe000) == 0xffffe000) { + *(ptr++) = (Negative13Bit << 5) | ((value >> 8) & 0x1f); + *(ptr++) = value & 0xff; + return; + } + + if ((value & 0xffffe000) == 0x40000000) { + *(ptr++) = (ConstantRegister13Bit << 5) | ((value >> 8) & 0x1f); + *(ptr++) = value & 0xff; + return; + } + + *(ptr++) = Full32Bit << 5; + *(ptr++) = value & 0xff; + *(ptr++) = (value >> 8) & 0xff; + *(ptr++) = (value >> 16) & 0xff; + *(ptr++) = (value >> 24) & 0xff; +} + +UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions) + : m_instructionCount(instructions.size()) +{ + Vector<unsigned char> buffer; + + // Reserve enough space up front so we never have to reallocate when appending. + buffer.resizeToFit(m_instructionCount * 5); + unsigned char* ptr = buffer.data(); + + const UnlinkedInstruction* instructionsData = instructions.data(); + for (unsigned i = 0; i < m_instructionCount;) { + const UnlinkedInstruction* pc = &instructionsData[i]; + OpcodeID opcode = pc[0].u.opcode; + append8(ptr, opcode); + + unsigned opLength = opcodeLength(opcode); + + for (unsigned j = 1; j < opLength; ++j) + append32(ptr, pc[j].u.index); + + i += opLength; + } + + buffer.shrink(ptr - buffer.data()); + m_data = RefCountedArray<unsigned char>(buffer); +} + +#ifndef NDEBUG +const RefCountedArray<UnlinkedInstruction>& UnlinkedInstructionStream::unpackForDebugging() const +{ + if (!m_unpackedInstructionsForDebugging.size()) { + m_unpackedInstructionsForDebugging = RefCountedArray<UnlinkedInstruction>(m_instructionCount); + + Reader instructionReader(*this); + for (unsigned i = 0; !instructionReader.atEnd(); ) { + const UnlinkedInstruction* pc = instructionReader.next(); + unsigned opLength = opcodeLength(pc[0].u.opcode); + for (unsigned j = 0; j < opLength; ++j) + m_unpackedInstructionsForDebugging[i++] = pc[j]; + } + } + + return m_unpackedInstructionsForDebugging; +} +#endif + +} + diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h new file mode 100644 index 000000000..6323c444b --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2014 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef UnlinkedInstructionStream_h +#define UnlinkedInstructionStream_h + +#include "UnlinkedCodeBlock.h" +#include <wtf/RefCountedArray.h> + +namespace JSC { + +class UnlinkedInstructionStream { + WTF_MAKE_FAST_ALLOCATED; +public: + explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>&); + + unsigned count() const { return m_instructionCount; } + + class Reader { + public: + explicit Reader(const UnlinkedInstructionStream&); + + const UnlinkedInstruction* next(); + bool atEnd() const { return m_index == m_stream.m_data.size(); } + + private: + unsigned char read8(); + unsigned read32(); + + const UnlinkedInstructionStream& m_stream; + UnlinkedInstruction m_unpackedBuffer[16]; + unsigned m_index; + }; + +#ifndef NDEBUG + const RefCountedArray<UnlinkedInstruction>& unpackForDebugging() const; +#endif + +private: + friend class Reader; + +#ifndef NDEBUG + mutable RefCountedArray<UnlinkedInstruction> m_unpackedInstructionsForDebugging; +#endif + + RefCountedArray<unsigned char> m_data; + unsigned m_instructionCount; +}; + +// Unlinked instructions are packed in a simple stream format. +// +// The first byte is always the opcode. +// It's followed by an opcode-dependent number of argument values. +// The first 3 bits of each value determines the format: +// +// 5-bit positive integer (1 byte total) +// 5-bit negative integer (1 byte total) +// 13-bit positive integer (2 bytes total) +// 13-bit negative integer (2 bytes total) +// 5-bit constant register index, based at 0x40000000 (1 byte total) +// 13-bit constant register index, based at 0x40000000 (2 bytes total) +// 32-bit raw value (5 bytes total) + +enum PackedValueType { + Positive5Bit = 0, + Negative5Bit, + Positive13Bit, + Negative13Bit, + ConstantRegister5Bit, + ConstantRegister13Bit, + Full32Bit +}; + +ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream) + : m_stream(stream) + , m_index(0) +{ +} + +ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8() +{ + return m_stream.m_data.data()[m_index++]; +} + +ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32() +{ + const unsigned char* data = &m_stream.m_data.data()[m_index]; + unsigned char type = data[0] >> 5; + + switch (type) { + case Positive5Bit: + m_index++; + return data[0]; + case Negative5Bit: + m_index++; + return 0xffffffe0 | data[0]; + case Positive13Bit: + m_index += 2; + return ((data[0] & 0x1F) << 8) | data[1]; + case Negative13Bit: + m_index += 2; + return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1]; + case ConstantRegister5Bit: + m_index++; + return 0x40000000 | (data[0] & 0x1F); + case ConstantRegister13Bit: + m_index += 2; + return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1]; + default: + ASSERT(type == Full32Bit); + m_index += 5; + return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24; + } +} + +ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next() +{ + m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8()); + unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode); + for (unsigned i = 1; i < opLength; ++i) + m_unpackedBuffer[i].u.index = read32(); + return m_unpackedBuffer; +} + +} // namespace JSC + +#endif // UnlinkedInstructionStream_h diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h new file mode 100644 index 000000000..99a9516c9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ValueProfile.h @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ValueProfile_h +#define ValueProfile_h + +#include "ConcurrentJITLock.h" +#include "Heap.h" +#include "JSArray.h" +#include "SpeculatedType.h" +#include "Structure.h" +#include "WriteBarrier.h" +#include <wtf/PrintStream.h> +#include <wtf/StringPrintStream.h> + +namespace JSC { + +template<unsigned numberOfBucketsArgument> +struct ValueProfileBase { + static const unsigned numberOfBuckets = numberOfBucketsArgument; + static const unsigned numberOfSpecFailBuckets = 1; + static const unsigned bucketIndexMask = numberOfBuckets - 1; + static const unsigned totalNumberOfBuckets = numberOfBuckets + numberOfSpecFailBuckets; + + ValueProfileBase() + : m_bytecodeOffset(-1) + , m_prediction(SpecNone) + , m_numberOfSamplesInPrediction(0) + { + for (unsigned i = 0; i < totalNumberOfBuckets; ++i) + m_buckets[i] = JSValue::encode(JSValue()); + } + + ValueProfileBase(int bytecodeOffset) + : m_bytecodeOffset(bytecodeOffset) + , m_prediction(SpecNone) + , m_numberOfSamplesInPrediction(0) + { + for (unsigned i = 0; i < totalNumberOfBuckets; ++i) + m_buckets[i] = JSValue::encode(JSValue()); + } + + EncodedJSValue* specFailBucket(unsigned i) + { + ASSERT(numberOfBuckets + i < totalNumberOfBuckets); + return m_buckets + numberOfBuckets + i; + } + + const ClassInfo* classInfo(unsigned bucket) const + { + JSValue value = JSValue::decode(m_buckets[bucket]); + if (!!value) { + if (!value.isCell()) + return 0; + return value.asCell()->structure()->classInfo(); + } + return 0; + } + + unsigned numberOfSamples() const + { + unsigned result = 0; + for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { + if (!!JSValue::decode(m_buckets[i])) + result++; + } + return result; + } + + unsigned totalNumberOfSamples() const + { + return numberOfSamples() + m_numberOfSamplesInPrediction; + } + + bool isLive() const + { + for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { + if (!!JSValue::decode(m_buckets[i])) + return true; + } + return false; + } + + CString briefDescription(const ConcurrentJITLocker& locker) + { + computeUpdatedPrediction(locker); + + StringPrintStream out; + out.print("predicting ", SpeculationDump(m_prediction)); + return out.toCString(); + } + + void dump(PrintStream& out) + { + out.print("samples = ", totalNumberOfSamples(), " prediction = ", SpeculationDump(m_prediction)); + bool first = true; + for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { + JSValue value = JSValue::decode(m_buckets[i]); + if (!!value) { + if (first) { + out.printf(": "); + first = false; + } else + out.printf(", "); + out.print(value); + } + } + } + + // Updates the prediction and returns the new one. Never call this from any thread + // that isn't executing the code. + SpeculatedType computeUpdatedPrediction(const ConcurrentJITLocker&) + { + for (unsigned i = 0; i < totalNumberOfBuckets; ++i) { + JSValue value = JSValue::decode(m_buckets[i]); + if (!value) + continue; + + m_numberOfSamplesInPrediction++; + mergeSpeculation(m_prediction, speculationFromValue(value)); + + m_buckets[i] = JSValue::encode(JSValue()); + } + + return m_prediction; + } + + int m_bytecodeOffset; // -1 for prologue + + SpeculatedType m_prediction; + unsigned m_numberOfSamplesInPrediction; + + EncodedJSValue m_buckets[totalNumberOfBuckets]; +}; + +struct MinimalValueProfile : public ValueProfileBase<0> { + MinimalValueProfile(): ValueProfileBase<0>() { } + MinimalValueProfile(int bytecodeOffset): ValueProfileBase<0>(bytecodeOffset) { } +}; + +template<unsigned logNumberOfBucketsArgument> +struct ValueProfileWithLogNumberOfBuckets : public ValueProfileBase<1 << logNumberOfBucketsArgument> { + static const unsigned logNumberOfBuckets = logNumberOfBucketsArgument; + + ValueProfileWithLogNumberOfBuckets() + : ValueProfileBase<1 << logNumberOfBucketsArgument>() + { + } + ValueProfileWithLogNumberOfBuckets(int bytecodeOffset) + : ValueProfileBase<1 << logNumberOfBucketsArgument>(bytecodeOffset) + { + } +}; + +struct ValueProfile : public ValueProfileWithLogNumberOfBuckets<0> { + ValueProfile(): ValueProfileWithLogNumberOfBuckets<0>() { } + ValueProfile(int bytecodeOffset): ValueProfileWithLogNumberOfBuckets<0>(bytecodeOffset) { } +}; + +template<typename T> +inline int getValueProfileBytecodeOffset(T* valueProfile) +{ + return valueProfile->m_bytecodeOffset; +} + +// This is a mini value profile to catch pathologies. It is a counter that gets +// incremented when we take the slow path on any instruction. +struct RareCaseProfile { + RareCaseProfile(int bytecodeOffset) + : m_bytecodeOffset(bytecodeOffset) + , m_counter(0) + { + } + + int m_bytecodeOffset; + uint32_t m_counter; +}; + +inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile) +{ + return rareCaseProfile->m_bytecodeOffset; +} + +} // namespace JSC + +#endif // ValueProfile_h + diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.cpp b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp new file mode 100644 index 000000000..996fd3bfe --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ValueRecovery.cpp @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ValueRecovery.h" + +#include "CodeBlock.h" +#include "JSCInlines.h" + +namespace JSC { + +JSValue ValueRecovery::recover(ExecState* exec) const +{ + switch (technique()) { + case DisplacedInJSStack: + return exec->r(virtualRegister().offset()).jsValue(); + case Int32DisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedInt32()); + case Int52DisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedInt52()); + case StrictInt52DisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedStrictInt52()); + case DoubleDisplacedInJSStack: + return jsNumber(exec->r(virtualRegister().offset()).unboxedDouble()); + case CellDisplacedInJSStack: + return exec->r(virtualRegister().offset()).unboxedCell(); + case BooleanDisplacedInJSStack: +#if USE(JSVALUE64) + return exec->r(virtualRegister().offset()).jsValue(); +#else + return jsBoolean(exec->r(virtualRegister().offset()).unboxedBoolean()); +#endif + case Constant: + return constant(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return JSValue(); + } +} + +#if ENABLE(JIT) + +void ValueRecovery::dumpInContext(PrintStream& out, DumpContext* context) const +{ + switch (technique()) { + case InGPR: + out.print(gpr()); + return; + case UnboxedInt32InGPR: + out.print("int32(", gpr(), ")"); + return; + case UnboxedInt52InGPR: + out.print("int52(", gpr(), ")"); + return; + case UnboxedStrictInt52InGPR: + out.print("strictInt52(", gpr(), ")"); + return; + case UnboxedBooleanInGPR: + out.print("bool(", gpr(), ")"); + return; + case UnboxedCellInGPR: + out.print("cell(", gpr(), ")"); + return; + case InFPR: + out.print(fpr()); + return; +#if USE(JSVALUE32_64) + case InPair: + out.print("pair(", tagGPR(), ", ", payloadGPR(), ")"); + return; +#endif + case DisplacedInJSStack: + out.print("*", virtualRegister()); + return; + case Int32DisplacedInJSStack: + out.print("*int32(", virtualRegister(), ")"); + return; + case Int52DisplacedInJSStack: + out.print("*int52(", virtualRegister(), ")"); + return; + case StrictInt52DisplacedInJSStack: + out.print("*strictInt52(", virtualRegister(), ")"); + return; + case DoubleDisplacedInJSStack: + out.print("*double(", virtualRegister(), ")"); + return; + case CellDisplacedInJSStack: + out.print("*cell(", virtualRegister(), ")"); + return; + case BooleanDisplacedInJSStack: + out.print("*bool(", virtualRegister(), ")"); + return; + case DirectArgumentsThatWereNotCreated: + out.print("DirectArguments(", nodeID(), ")"); + return; + case ClonedArgumentsThatWereNotCreated: + out.print("ClonedArguments(", nodeID(), ")"); + return; + case Constant: + out.print("[", inContext(constant(), context), "]"); + return; + case DontKnow: + out.printf("!"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void ValueRecovery::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} +#endif // ENABLE(JIT) + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h new file mode 100644 index 000000000..42651e2c7 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ValueRecovery_h +#define ValueRecovery_h + +#include "DFGMinifiedID.h" +#include "DataFormat.h" +#if ENABLE(JIT) +#include "GPRInfo.h" +#include "FPRInfo.h" +#endif +#include "JSCJSValue.h" +#include "MacroAssembler.h" +#include "VirtualRegister.h" + +namespace JSC { + +struct DumpContext; +struct InlineCallFrame; + +// Describes how to recover a given bytecode virtual register at a given +// code point. +enum ValueRecoveryTechnique { + // It's in a register. + InGPR, + UnboxedInt32InGPR, + UnboxedInt52InGPR, + UnboxedStrictInt52InGPR, + UnboxedBooleanInGPR, + UnboxedCellInGPR, +#if USE(JSVALUE32_64) + InPair, +#endif + InFPR, + // It's in the stack, but at a different location. + DisplacedInJSStack, + // It's in the stack, at a different location, and it's unboxed. + Int32DisplacedInJSStack, + Int52DisplacedInJSStack, + StrictInt52DisplacedInJSStack, + DoubleDisplacedInJSStack, + CellDisplacedInJSStack, + BooleanDisplacedInJSStack, + // It's an Arguments object. This arises because of the simplified arguments simplification done by the DFG. + DirectArgumentsThatWereNotCreated, + ClonedArgumentsThatWereNotCreated, + // It's a constant. + Constant, + // Don't know how to recover it. + DontKnow +}; + +class ValueRecovery { +public: + ValueRecovery() + : m_technique(DontKnow) + { + } + + bool isSet() const { return m_technique != DontKnow; } + bool operator!() const { return !isSet(); } + + static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat) + { + ASSERT(dataFormat != DataFormatNone); +#if USE(JSVALUE32_64) + ASSERT(dataFormat == DataFormatInt32 || dataFormat == DataFormatCell || dataFormat == DataFormatBoolean); +#endif + ValueRecovery result; + if (dataFormat == DataFormatInt32) + result.m_technique = UnboxedInt32InGPR; + else if (dataFormat == DataFormatInt52) + result.m_technique = UnboxedInt52InGPR; + else if (dataFormat == DataFormatStrictInt52) + result.m_technique = UnboxedStrictInt52InGPR; + else if (dataFormat == DataFormatBoolean) + result.m_technique = UnboxedBooleanInGPR; + else if (dataFormat == DataFormatCell) + result.m_technique = UnboxedCellInGPR; + else + result.m_technique = InGPR; + result.m_source.gpr = gpr; + return result; + } + +#if USE(JSVALUE32_64) + static ValueRecovery inPair(MacroAssembler::RegisterID tagGPR, MacroAssembler::RegisterID payloadGPR) + { + ValueRecovery result; + result.m_technique = InPair; + result.m_source.pair.tagGPR = tagGPR; + result.m_source.pair.payloadGPR = payloadGPR; + return result; + } +#endif + + static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr) + { + ValueRecovery result; + result.m_technique = InFPR; + result.m_source.fpr = fpr; + return result; + } + + static ValueRecovery displacedInJSStack(VirtualRegister virtualReg, DataFormat dataFormat) + { + ValueRecovery result; + switch (dataFormat) { + case DataFormatInt32: + result.m_technique = Int32DisplacedInJSStack; + break; + + case DataFormatInt52: + result.m_technique = Int52DisplacedInJSStack; + break; + + case DataFormatStrictInt52: + result.m_technique = StrictInt52DisplacedInJSStack; + break; + + case DataFormatDouble: + result.m_technique = DoubleDisplacedInJSStack; + break; + + case DataFormatCell: + result.m_technique = CellDisplacedInJSStack; + break; + + case DataFormatBoolean: + result.m_technique = BooleanDisplacedInJSStack; + break; + + default: + ASSERT(dataFormat != DataFormatNone && dataFormat != DataFormatStorage); + result.m_technique = DisplacedInJSStack; + break; + } + result.m_source.virtualReg = virtualReg.offset(); + return result; + } + + static ValueRecovery constant(JSValue value) + { + ValueRecovery result; + result.m_technique = Constant; + result.m_source.constant = JSValue::encode(value); + return result; + } + + static ValueRecovery directArgumentsThatWereNotCreated(DFG::MinifiedID id) + { + ValueRecovery result; + result.m_technique = DirectArgumentsThatWereNotCreated; + result.m_source.nodeID = id.bits(); + return result; + } + + static ValueRecovery outOfBandArgumentsThatWereNotCreated(DFG::MinifiedID id) + { + ValueRecovery result; + result.m_technique = ClonedArgumentsThatWereNotCreated; + result.m_source.nodeID = id.bits(); + return result; + } + + ValueRecoveryTechnique technique() const { return m_technique; } + + bool isConstant() const { return m_technique == Constant; } + + bool isInRegisters() const + { + switch (m_technique) { + case InGPR: + case UnboxedInt32InGPR: + case UnboxedBooleanInGPR: + case UnboxedCellInGPR: + case UnboxedInt52InGPR: + case UnboxedStrictInt52InGPR: +#if USE(JSVALUE32_64) + case InPair: +#endif + case InFPR: + return true; + default: + return false; + } + } + + MacroAssembler::RegisterID gpr() const + { + ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR || m_technique == UnboxedBooleanInGPR || m_technique == UnboxedInt52InGPR || m_technique == UnboxedStrictInt52InGPR || m_technique == UnboxedCellInGPR); + return m_source.gpr; + } + +#if USE(JSVALUE32_64) + MacroAssembler::RegisterID tagGPR() const + { + ASSERT(m_technique == InPair); + return m_source.pair.tagGPR; + } + + MacroAssembler::RegisterID payloadGPR() const + { + ASSERT(m_technique == InPair); + return m_source.pair.payloadGPR; + } +#endif + + MacroAssembler::FPRegisterID fpr() const + { + ASSERT(m_technique == InFPR); + return m_source.fpr; + } + + VirtualRegister virtualRegister() const + { + ASSERT(m_technique == DisplacedInJSStack || m_technique == Int32DisplacedInJSStack || m_technique == DoubleDisplacedInJSStack || m_technique == CellDisplacedInJSStack || m_technique == BooleanDisplacedInJSStack || m_technique == Int52DisplacedInJSStack || m_technique == StrictInt52DisplacedInJSStack); + return VirtualRegister(m_source.virtualReg); + } + + ValueRecovery withLocalsOffset(int offset) const + { + switch (m_technique) { + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: + case CellDisplacedInJSStack: + case BooleanDisplacedInJSStack: + case Int52DisplacedInJSStack: + case StrictInt52DisplacedInJSStack: { + ValueRecovery result; + result.m_technique = m_technique; + result.m_source.virtualReg = m_source.virtualReg + offset; + return result; + } + + default: + return *this; + } + } + + JSValue constant() const + { + ASSERT(m_technique == Constant); + return JSValue::decode(m_source.constant); + } + + DFG::MinifiedID nodeID() const + { + ASSERT(m_technique == DirectArgumentsThatWereNotCreated || m_technique == ClonedArgumentsThatWereNotCreated); + return DFG::MinifiedID::fromBits(m_source.nodeID); + } + + JSValue recover(ExecState*) const; + +#if ENABLE(JIT) + void dumpInContext(PrintStream& out, DumpContext* context) const; + void dump(PrintStream& out) const; +#endif + +private: + ValueRecoveryTechnique m_technique; + union { + MacroAssembler::RegisterID gpr; + MacroAssembler::FPRegisterID fpr; +#if USE(JSVALUE32_64) + struct { + MacroAssembler::RegisterID tagGPR; + MacroAssembler::RegisterID payloadGPR; + } pair; +#endif + int virtualReg; + EncodedJSValue constant; + uintptr_t nodeID; + } m_source; +}; + +} // namespace JSC + +#endif // ValueRecovery_h diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp new file mode 100644 index 000000000..b483ab21c --- /dev/null +++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "VariableWriteFireDetail.h" + +#include "JSCInlines.h" + +namespace JSC { + +void VariableWriteFireDetail::dump(PrintStream& out) const +{ + out.print("Write to ", m_name, " in ", JSValue(m_object)); +} + +void VariableWriteFireDetail::touch(WatchpointSet* set, JSObject* object, const PropertyName& name) +{ + set->touch(VariableWriteFireDetail(object, name)); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h new file mode 100644 index 000000000..664f69cbb --- /dev/null +++ b/Source/JavaScriptCore/bytecode/VariableWriteFireDetail.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef VariableWriteFireDetail_h +#define VariableWriteFireDetail_h + +#include "Watchpoint.h" + +namespace JSC { + +class JSObject; +class PropertyName; + +class VariableWriteFireDetail : public FireDetail { +public: + VariableWriteFireDetail(JSObject* object, const PropertyName& name) + : m_object(object) + , m_name(name) + { + } + + virtual void dump(PrintStream&) const override; + + JS_EXPORT_PRIVATE static void touch(WatchpointSet*, JSObject*, const PropertyName&); + +private: + JSObject* m_object; + const PropertyName& m_name; +}; + +} // namespace JSC + +#endif // VariableWriteFireDetail_h diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp new file mode 100644 index 000000000..57cdb62c9 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "VirtualRegister.h" + +namespace JSC { + +void VirtualRegister::dump(PrintStream& out) const +{ + if (!isValid()) { + out.print("<invalid>"); + return; + } + + if (isHeader()) { + out.print("head", m_virtualRegister); + return; + } + + if (isConstant()) { + out.print("const", toConstantIndex()); + return; + } + + if (isArgument()) { + if (!toArgument()) + out.print("this"); + else + out.print("arg", toArgument()); + return; + } + + if (isLocal()) { + out.print("loc", toLocal()); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h new file mode 100644 index 000000000..613088ef6 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef VirtualRegister_h +#define VirtualRegister_h + +#include "CallFrame.h" + +#include <wtf/PrintStream.h> + +namespace JSC { + +inline bool operandIsLocal(int operand) +{ + return operand < 0; +} + +inline bool operandIsArgument(int operand) +{ + return operand >= 0; +} + + +class VirtualRegister { +public: + friend VirtualRegister virtualRegisterForLocal(int); + friend VirtualRegister virtualRegisterForArgument(int, int); + + VirtualRegister() + : m_virtualRegister(s_invalidVirtualRegister) + { } + + explicit VirtualRegister(int virtualRegister) + : m_virtualRegister(virtualRegister) + { } + + bool isValid() const { return (m_virtualRegister != s_invalidVirtualRegister); } + bool isLocal() const { return operandIsLocal(m_virtualRegister); } + bool isArgument() const { return operandIsArgument(m_virtualRegister); } + bool isHeader() const { return m_virtualRegister >= 0 && m_virtualRegister < JSStack::ThisArgument; } + bool isConstant() const { return m_virtualRegister >= s_firstConstantRegisterIndex; } + int toLocal() const { ASSERT(isLocal()); return operandToLocal(m_virtualRegister); } + int toArgument() const { ASSERT(isArgument()); return operandToArgument(m_virtualRegister); } + int toConstantIndex() const { ASSERT(isConstant()); return m_virtualRegister - s_firstConstantRegisterIndex; } + int offset() const { return m_virtualRegister; } + int offsetInBytes() const { return m_virtualRegister * sizeof(Register); } + + bool operator==(VirtualRegister other) const { return m_virtualRegister == other.m_virtualRegister; } + bool operator!=(VirtualRegister other) const { return m_virtualRegister != other.m_virtualRegister; } + bool operator<(VirtualRegister other) const { return m_virtualRegister < other.m_virtualRegister; } + bool operator>(VirtualRegister other) const { return m_virtualRegister > other.m_virtualRegister; } + bool operator<=(VirtualRegister other) const { return m_virtualRegister <= other.m_virtualRegister; } + bool operator>=(VirtualRegister other) const { return m_virtualRegister >= other.m_virtualRegister; } + + VirtualRegister operator+(int value) const + { + return VirtualRegister(offset() + value); + } + VirtualRegister operator-(int value) const + { + return VirtualRegister(offset() - value); + } + VirtualRegister operator+(VirtualRegister value) const + { + return VirtualRegister(offset() + value.offset()); + } + VirtualRegister operator-(VirtualRegister value) const + { + return VirtualRegister(offset() - value.offset()); + } + VirtualRegister& operator+=(int value) + { + return *this = *this + value; + } + VirtualRegister& operator-=(int value) + { + return *this = *this - value; + } + + void dump(PrintStream& out) const; + +private: + static const int s_invalidVirtualRegister = 0x3fffffff; + static const int s_firstConstantRegisterIndex = 0x40000000; + + static int localToOperand(int local) { return -1 - local; } + static int operandToLocal(int operand) { return -1 - operand; } + static int operandToArgument(int operand) { return operand - CallFrame::thisArgumentOffset(); } + static int argumentToOperand(int argument) { return argument + CallFrame::thisArgumentOffset(); } + + int m_virtualRegister; +}; + +COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit); + +inline VirtualRegister virtualRegisterForLocal(int local) +{ + return VirtualRegister(VirtualRegister::localToOperand(local)); +} + +inline VirtualRegister virtualRegisterForArgument(int argument, int offset = 0) +{ + return VirtualRegister(VirtualRegister::argumentToOperand(argument) + offset); +} + +} // namespace JSC + +#endif // VirtualRegister_h diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.cpp b/Source/JavaScriptCore/bytecode/Watchpoint.cpp new file mode 100644 index 000000000..761c06744 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Watchpoint.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Watchpoint.h" + +#include <wtf/CompilationThread.h> +#include <wtf/PassRefPtr.h> + +namespace JSC { + +void StringFireDetail::dump(PrintStream& out) const +{ + out.print(m_string); +} + +Watchpoint::~Watchpoint() +{ + if (isOnList()) { + // This will happen if we get destroyed before the set fires. That's totally a valid + // possibility. For example: + // + // CodeBlock has a Watchpoint on transition from structure S1. The transition never + // happens, but the CodeBlock gets destroyed because of GC. + remove(); + } +} + +void Watchpoint::fire(const FireDetail& detail) +{ + RELEASE_ASSERT(!isOnList()); + fireInternal(detail); +} + +WatchpointSet::WatchpointSet(WatchpointState state) + : m_state(state) + , m_setIsNotEmpty(false) +{ +} + +WatchpointSet::~WatchpointSet() +{ + // Remove all watchpoints, so that they don't try to remove themselves. Note that we + // don't fire watchpoints on deletion. We assume that any code that is interested in + // watchpoints already also separately has a mechanism to make sure that the code is + // either keeping the watchpoint set's owner alive, or does some weak reference thing. + while (!m_set.isEmpty()) + m_set.begin()->remove(); +} + +void WatchpointSet::add(Watchpoint* watchpoint) +{ + ASSERT(!isCompilationThread()); + ASSERT(state() != IsInvalidated); + if (!watchpoint) + return; + m_set.push(watchpoint); + m_setIsNotEmpty = true; + m_state = IsWatched; +} + +void WatchpointSet::fireAllSlow(const FireDetail& detail) +{ + ASSERT(state() == IsWatched); + + WTF::storeStoreFence(); + m_state = IsInvalidated; // Do this first. Needed for adaptive watchpoints. + fireAllWatchpoints(detail); + WTF::storeStoreFence(); +} + +void WatchpointSet::fireAllSlow(const char* reason) +{ + fireAllSlow(StringFireDetail(reason)); +} + +void WatchpointSet::fireAllWatchpoints(const FireDetail& detail) +{ + // In case there are any adaptive watchpoints, we need to make sure that they see that this + // watchpoint has been already invalidated. + RELEASE_ASSERT(hasBeenInvalidated()); + + while (!m_set.isEmpty()) { + Watchpoint* watchpoint = m_set.begin(); + ASSERT(watchpoint->isOnList()); + + // Removing the Watchpoint before firing it makes it possible to implement watchpoints + // that add themselves to a different set when they fire. This kind of "adaptive" + // watchpoint can be used to track some semantic property that is more fine-graiend than + // what the set can convey. For example, we might care if a singleton object ever has a + // property called "foo". We can watch for this by checking if its Structure has "foo" and + // then watching its transitions. But then the watchpoint fires if any property is added. + // So, before the watchpoint decides to invalidate any code, it can check if it is + // possible to add itself to the transition watchpoint set of the singleton object's new + // Structure. + watchpoint->remove(); + ASSERT(m_set.begin() != watchpoint); + ASSERT(!watchpoint->isOnList()); + + watchpoint->fire(detail); + // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's + // fine, because we have no use for the pointer anymore. + } +} + +void InlineWatchpointSet::add(Watchpoint* watchpoint) +{ + inflate()->add(watchpoint); +} + +void InlineWatchpointSet::fireAll(const char* reason) +{ + fireAll(StringFireDetail(reason)); +} + +WatchpointSet* InlineWatchpointSet::inflateSlow() +{ + ASSERT(isThin()); + ASSERT(!isCompilationThread()); + WatchpointSet* fat = adoptRef(new WatchpointSet(decodeState(m_data))).leakRef(); + WTF::storeStoreFence(); + m_data = bitwise_cast<uintptr_t>(fat); + return fat; +} + +void InlineWatchpointSet::freeFat() +{ + ASSERT(isFat()); + fat()->deref(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/bytecode/Watchpoint.h b/Source/JavaScriptCore/bytecode/Watchpoint.h new file mode 100644 index 000000000..c8f628d33 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Watchpoint.h @@ -0,0 +1,401 @@ +/* + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Watchpoint_h +#define Watchpoint_h + +#include <wtf/Atomics.h> +#include <wtf/FastMalloc.h> +#include <wtf/Noncopyable.h> +#include <wtf/PrintStream.h> +#include <wtf/SentinelLinkedList.h> +#include <wtf/ThreadSafeRefCounted.h> + +namespace JSC { + +class FireDetail { + void* operator new(size_t) = delete; + +public: + FireDetail() + { + } + + virtual ~FireDetail() + { + } + + virtual void dump(PrintStream&) const = 0; +}; + +class StringFireDetail : public FireDetail { +public: + StringFireDetail(const char* string) + : m_string(string) + { + } + + virtual void dump(PrintStream& out) const override; + +private: + const char* m_string; +}; + +class WatchpointSet; + +class Watchpoint : public BasicRawSentinelNode<Watchpoint> { + WTF_MAKE_NONCOPYABLE(Watchpoint); + WTF_MAKE_FAST_ALLOCATED; +public: + Watchpoint() + { + } + + virtual ~Watchpoint(); + +protected: + virtual void fireInternal(const FireDetail&) = 0; + +private: + friend class WatchpointSet; + void fire(const FireDetail&); +}; + +enum WatchpointState { + ClearWatchpoint, + IsWatched, + IsInvalidated +}; + +class InlineWatchpointSet; + +class WatchpointSet : public ThreadSafeRefCounted<WatchpointSet> { + friend class LLIntOffsetsExtractor; +public: + JS_EXPORT_PRIVATE WatchpointSet(WatchpointState); + JS_EXPORT_PRIVATE ~WatchpointSet(); // Note that this will not fire any of the watchpoints; if you need to know when a WatchpointSet dies then you need a separate mechanism for this. + + // Fast way of getting the state, which only works from the main thread. + WatchpointState stateOnJSThread() const + { + return static_cast<WatchpointState>(m_state); + } + + // It is safe to call this from another thread. It may return an old + // state. Guarantees that if *first* read the state() of the thing being + // watched and it returned IsWatched and *second* you actually read its + // value then it's safe to assume that if the state being watched changes + // then also the watchpoint state() will change to IsInvalidated. + WatchpointState state() const + { + WTF::loadLoadFence(); + WatchpointState result = static_cast<WatchpointState>(m_state); + WTF::loadLoadFence(); + return result; + } + + // It is safe to call this from another thread. It may return true + // even if the set actually had been invalidated, but that ought to happen + // only in the case of races, and should be rare. Guarantees that if you + // call this after observing something that must imply that the set is + // invalidated, then you will see this return false. This is ensured by + // issuing a load-load fence prior to querying the state. + bool isStillValid() const + { + return state() != IsInvalidated; + } + // Like isStillValid(), may be called from another thread. + bool hasBeenInvalidated() const { return !isStillValid(); } + + // As a convenience, this will ignore 0. That's because code paths in the DFG + // that create speculation watchpoints may choose to bail out if speculation + // had already been terminated. + void add(Watchpoint*); + + // Force the watchpoint set to behave as if it was being watched even if no + // watchpoints have been installed. This will result in invalidation if the + // watchpoint would have fired. That's a pretty good indication that you + // probably don't want to set watchpoints, since we typically don't want to + // set watchpoints that we believe will actually be fired. + void startWatching() + { + ASSERT(m_state != IsInvalidated); + if (m_state == IsWatched) + return; + WTF::storeStoreFence(); + m_state = IsWatched; + WTF::storeStoreFence(); + } + + void fireAll(const FireDetail& detail) + { + if (LIKELY(m_state != IsWatched)) + return; + fireAllSlow(detail); + } + + void fireAll(const char* reason) + { + if (LIKELY(m_state != IsWatched)) + return; + fireAllSlow(reason); + } + + void touch(const FireDetail& detail) + { + if (state() == ClearWatchpoint) + startWatching(); + else + fireAll(detail); + } + + void touch(const char* reason) + { + touch(StringFireDetail(reason)); + } + + void invalidate(const FireDetail& detail) + { + if (state() == IsWatched) + fireAll(detail); + m_state = IsInvalidated; + } + + void invalidate(const char* reason) + { + invalidate(StringFireDetail(reason)); + } + + bool isBeingWatched() const + { + return m_setIsNotEmpty; + } + + int8_t* addressOfState() { return &m_state; } + int8_t* addressOfSetIsNotEmpty() { return &m_setIsNotEmpty; } + + JS_EXPORT_PRIVATE void fireAllSlow(const FireDetail&); // Call only if you've checked isWatched. + JS_EXPORT_PRIVATE void fireAllSlow(const char* reason); // Ditto. + +private: + void fireAllWatchpoints(const FireDetail&); + + friend class InlineWatchpointSet; + + int8_t m_state; + int8_t m_setIsNotEmpty; + + SentinelLinkedList<Watchpoint, BasicRawSentinelNode<Watchpoint>> m_set; +}; + +// InlineWatchpointSet is a low-overhead, non-copyable watchpoint set in which +// it is not possible to quickly query whether it is being watched in a single +// branch. There is a fairly simple tradeoff between WatchpointSet and +// InlineWatchpointSet: +// +// Do you have to emit JIT code that rapidly tests whether the watchpoint set +// is being watched? If so, use WatchpointSet. +// +// Do you need multiple parties to have pointers to the same WatchpointSet? +// If so, use WatchpointSet. +// +// Do you have to allocate a lot of watchpoint sets? If so, use +// InlineWatchpointSet unless you answered "yes" to the previous questions. +// +// InlineWatchpointSet will use just one pointer-width word of memory unless +// you actually add watchpoints to it, in which case it internally inflates +// to a pointer to a WatchpointSet, and transfers its state to the +// WatchpointSet. + +class InlineWatchpointSet { + WTF_MAKE_NONCOPYABLE(InlineWatchpointSet); +public: + InlineWatchpointSet(WatchpointState state) + : m_data(encodeState(state)) + { + } + + ~InlineWatchpointSet() + { + if (isThin()) + return; + freeFat(); + } + + // Fast way of getting the state, which only works from the main thread. + WatchpointState stateOnJSThread() const + { + uintptr_t data = m_data; + if (isFat(data)) + return fat(data)->stateOnJSThread(); + return decodeState(data); + } + + // It is safe to call this from another thread. It may return a prior state, + // but that should be fine since you should only perform actions based on the + // state if you also add a watchpoint. + WatchpointState state() const + { + WTF::loadLoadFence(); + uintptr_t data = m_data; + WTF::loadLoadFence(); + if (isFat(data)) + return fat(data)->state(); + return decodeState(data); + } + + // It is safe to call this from another thread. It may return false + // even if the set actually had been invalidated, but that ought to happen + // only in the case of races, and should be rare. + bool hasBeenInvalidated() const + { + return state() == IsInvalidated; + } + + // Like hasBeenInvalidated(), may be called from another thread. + bool isStillValid() const + { + return !hasBeenInvalidated(); + } + + void add(Watchpoint*); + + void startWatching() + { + if (isFat()) { + fat()->startWatching(); + return; + } + ASSERT(decodeState(m_data) != IsInvalidated); + m_data = encodeState(IsWatched); + } + + void fireAll(const FireDetail& detail) + { + if (isFat()) { + fat()->fireAll(detail); + return; + } + if (decodeState(m_data) == ClearWatchpoint) + return; + m_data = encodeState(IsInvalidated); + WTF::storeStoreFence(); + } + + void invalidate(const FireDetail& detail) + { + if (isFat()) + fat()->invalidate(detail); + else + m_data = encodeState(IsInvalidated); + } + + JS_EXPORT_PRIVATE void fireAll(const char* reason); + + void touch(const FireDetail& detail) + { + if (isFat()) { + fat()->touch(detail); + return; + } + uintptr_t data = m_data; + if (decodeState(data) == IsInvalidated) + return; + WTF::storeStoreFence(); + if (decodeState(data) == ClearWatchpoint) + m_data = encodeState(IsWatched); + else + m_data = encodeState(IsInvalidated); + WTF::storeStoreFence(); + } + + void touch(const char* reason) + { + touch(StringFireDetail(reason)); + } + + bool isBeingWatched() const + { + if (isFat()) + return fat()->isBeingWatched(); + return false; + } + +private: + static const uintptr_t IsThinFlag = 1; + static const uintptr_t StateMask = 6; + static const uintptr_t StateShift = 1; + + static bool isThin(uintptr_t data) { return data & IsThinFlag; } + static bool isFat(uintptr_t data) { return !isThin(data); } + + static WatchpointState decodeState(uintptr_t data) + { + ASSERT(isThin(data)); + return static_cast<WatchpointState>((data & StateMask) >> StateShift); + } + + static uintptr_t encodeState(WatchpointState state) + { + return (static_cast<uintptr_t>(state) << StateShift) | IsThinFlag; + } + + bool isThin() const { return isThin(m_data); } + bool isFat() const { return isFat(m_data); }; + + static WatchpointSet* fat(uintptr_t data) + { + return bitwise_cast<WatchpointSet*>(data); + } + + WatchpointSet* fat() + { + ASSERT(isFat()); + return fat(m_data); + } + + const WatchpointSet* fat() const + { + ASSERT(isFat()); + return fat(m_data); + } + + WatchpointSet* inflate() + { + if (LIKELY(isFat())) + return fat(); + return inflateSlow(); + } + + JS_EXPORT_PRIVATE WatchpointSet* inflateSlow(); + JS_EXPORT_PRIVATE void freeFat(); + + uintptr_t m_data; +}; + +} // namespace JSC + +#endif // Watchpoint_h + |
