diff options
Diffstat (limited to 'Source/JavaScriptCore/ftl')
79 files changed, 19926 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h b/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h new file mode 100644 index 000000000..ee3a15075 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAbbreviatedTypes.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLAbbreviatedTypes_h +#define FTLAbbreviatedTypes_h + +#if ENABLE(FTL_JIT) + +#include "DFGCommon.h" + +namespace JSC { namespace B3 { +class BasicBlock; +class Value; +enum Type : int8_t; +} } + +namespace JSC { namespace FTL { + +typedef B3::BasicBlock* LBasicBlock; +typedef B3::Type LType; +typedef B3::Value* LValue; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLAbbreviatedTypes_h + diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp b/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp new file mode 100644 index 000000000..90211fd94 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAbstractHeap.cpp @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLAbstractHeap.h" + +#if ENABLE(FTL_JIT) + +#include "DFGCommon.h" +#include "FTLAbbreviatedTypes.h" +#include "FTLAbstractHeapRepository.h" +#include "FTLOutput.h" +#include "FTLTypedPointer.h" +#include "JSCInlines.h" +#include "Options.h" + +namespace JSC { namespace FTL { + +using namespace B3; + +AbstractHeap::AbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset) + : m_offset(offset) + , m_heapName(heapName) +{ + changeParent(parent); +} + +void AbstractHeap::changeParent(AbstractHeap* parent) +{ + if (m_parent) { + bool result = m_parent->m_children.removeFirst(this); + RELEASE_ASSERT(result); + } + + m_parent = parent; + + if (parent) { + ASSERT(!m_parent->m_children.contains(this)); + m_parent->m_children.append(this); + } +} + +void AbstractHeap::compute(unsigned begin) +{ + // This recursively computes the ranges of the tree. This solves the following constraints + // in linear time: + // + // - A node's end is greater than its begin. + // - A node's begin is greater than or equal to its parent's begin. + // - A node's end is less than or equal to its parent's end. + // - The ranges are as small as possible. + // + // It's OK to recurse because we keep the depth of our abstract heap hierarchy fairly sane. + // I think that it gets 4 deep at most. + + if (m_children.isEmpty()) { + // Must special-case leaves so that they use just one slot on the number line. + m_range = HeapRange(begin); + return; + } + + unsigned current = begin; + for (AbstractHeap* child : m_children) { + child->compute(current); + current = child->range().end(); + } + + m_range = HeapRange(begin, current); +} + +void AbstractHeap::shallowDump(PrintStream& out) const +{ + out.print(heapName(), "(", m_offset, ")"); + if (m_range) + out.print("<", m_range, ">"); +} + +void AbstractHeap::dump(PrintStream& out) const +{ + shallowDump(out); + if (m_parent) + out.print("->", *m_parent); +} + +void AbstractHeap::deepDump(PrintStream& out, unsigned indent) const +{ + auto printIndent = [&] () { + for (unsigned i = indent; i--;) + out.print(" "); + }; + + printIndent(); + shallowDump(out); + + if (m_children.isEmpty()) { + out.print("\n"); + return; + } + + out.print(":\n"); + for (AbstractHeap* child : m_children) + child->deepDump(out, indent + 1); +} + +void AbstractHeap::badRangeError() const +{ + dataLog("Heap does not have range: ", *this, "\n"); + RELEASE_ASSERT_NOT_REACHED(); +} + +IndexedAbstractHeap::IndexedAbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset, size_t elementSize) + : m_heapForAnyIndex(parent, heapName) + , m_heapNameLength(strlen(heapName)) + , m_offset(offset) + , m_elementSize(elementSize) +{ +} + +IndexedAbstractHeap::~IndexedAbstractHeap() +{ +} + +TypedPointer IndexedAbstractHeap::baseIndex(Output& out, LValue base, LValue index, JSValue indexAsConstant, ptrdiff_t offset) +{ + if (indexAsConstant.isInt32()) + return out.address(base, at(indexAsConstant.asInt32()), offset); + + LValue result = out.add(base, out.mul(index, out.constIntPtr(m_elementSize))); + + return TypedPointer(atAnyIndex(), out.addPtr(result, m_offset + offset)); +} + +const AbstractHeap& IndexedAbstractHeap::atSlow(ptrdiff_t index) +{ + ASSERT(static_cast<size_t>(index) >= m_smallIndices.size()); + + if (UNLIKELY(!m_largeIndices)) + m_largeIndices = std::make_unique<MapType>(); + + std::unique_ptr<AbstractHeap>& field = m_largeIndices->add(index, nullptr).iterator->value; + if (!field) { + field = std::make_unique<AbstractHeap>(); + initialize(*field, index); + } + + return *field; +} + +void IndexedAbstractHeap::initialize(AbstractHeap& field, ptrdiff_t signedIndex) +{ + // Build up a name of the form: + // + // heapName_hexIndex + // + // or: + // + // heapName_neg_hexIndex + // + // For example if you access an indexed heap called FooBar at index 5, you'll + // get: + // + // FooBar_5 + // + // Or if you access an indexed heap called Blah at index -10, you'll get: + // + // Blah_neg_A + // + // This naming convention comes from our previous use of LLVM. It's not clear that we need + // it anymore, though it is sort of nifty. Basically, B3 doesn't need string names for + // abstract heaps, but the fact that we have a reasonably efficient way to always name the + // heaps will probably come in handy for debugging. + + static const char* negSplit = "_neg_"; + static const char* posSplit = "_"; + + bool negative; + size_t index; + if (signedIndex < 0) { + negative = true; + index = -signedIndex; + } else { + negative = false; + index = signedIndex; + } + + for (unsigned power = 4; power <= sizeof(void*) * 8; power += 4) { + if (isGreaterThanNonZeroPowerOfTwo(index, power)) + continue; + + unsigned numHexlets = power >> 2; + + size_t stringLength = m_heapNameLength + (negative ? strlen(negSplit) : strlen(posSplit)) + numHexlets; + char* characters; + m_largeIndexNames.append(CString::newUninitialized(stringLength, characters)); + + memcpy(characters, m_heapForAnyIndex.heapName(), m_heapNameLength); + if (negative) + memcpy(characters + m_heapNameLength, negSplit, strlen(negSplit)); + else + memcpy(characters + m_heapNameLength, posSplit, strlen(posSplit)); + + size_t accumulator = index; + for (unsigned i = 0; i < numHexlets; ++i) { + characters[stringLength - i - 1] = lowerNibbleToASCIIHexDigit(accumulator); + accumulator >>= 4; + } + + field.initialize(&m_heapForAnyIndex, characters, m_offset + signedIndex * m_elementSize); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +void IndexedAbstractHeap::dump(PrintStream& out) const +{ + out.print("Indexed:", atAnyIndex()); +} + +NumberedAbstractHeap::NumberedAbstractHeap(AbstractHeap* heap, const char* heapName) + : m_indexedHeap(heap, heapName, 0, 1) +{ +} + +NumberedAbstractHeap::~NumberedAbstractHeap() +{ +} + +void NumberedAbstractHeap::dump(PrintStream& out) const +{ + out.print("Numbered: ", atAnyNumber()); +} + +AbsoluteAbstractHeap::AbsoluteAbstractHeap(AbstractHeap* heap, const char* heapName) + : m_indexedHeap(heap, heapName, 0, 1) +{ +} + +AbsoluteAbstractHeap::~AbsoluteAbstractHeap() +{ +} + +void AbsoluteAbstractHeap::dump(PrintStream& out) const +{ + out.print("Absolute:", atAnyAddress()); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeap.h b/Source/JavaScriptCore/ftl/FTLAbstractHeap.h new file mode 100644 index 000000000..9b17868c2 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAbstractHeap.h @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLAbstractHeap_h +#define FTLAbstractHeap_h + +#if ENABLE(FTL_JIT) + +#include "B3HeapRange.h" +#include "FTLAbbreviatedTypes.h" +#include "JSCJSValue.h" +#include <array> +#include <wtf/FastMalloc.h> +#include <wtf/HashMap.h> +#include <wtf/Noncopyable.h> +#include <wtf/Vector.h> +#include <wtf/text/CString.h> + +namespace JSC { namespace FTL { + +class AbstractHeapRepository; +class Output; +class TypedPointer; + +class AbstractHeap { + WTF_MAKE_NONCOPYABLE(AbstractHeap); WTF_MAKE_FAST_ALLOCATED; +public: + AbstractHeap() + { + } + + AbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset = 0); + + bool isInitialized() const { return !!m_heapName; } + + void initialize(AbstractHeap* parent, const char* heapName, ptrdiff_t offset = 0) + { + changeParent(parent); + m_heapName = heapName; + m_offset = offset; + } + + void changeParent(AbstractHeap* parent); + + AbstractHeap* parent() const + { + ASSERT(isInitialized()); + return m_parent; + } + + const Vector<AbstractHeap*>& children() const; + + const char* heapName() const + { + ASSERT(isInitialized()); + return m_heapName; + } + + B3::HeapRange range() const + { + // This will not have a valid value until after all lowering is done. Do associate an + // AbstractHeap with a B3::Value*, use AbstractHeapRepository::decorateXXX(). + if (!m_range) + badRangeError(); + + return m_range; + } + + // WARNING: Not all abstract heaps have a meaningful offset. + ptrdiff_t offset() const + { + ASSERT(isInitialized()); + return m_offset; + } + + void compute(unsigned begin = 0); + + // Print information about just this heap. + void shallowDump(PrintStream&) const; + + // Print information about this heap and its ancestors. This is the default. + void dump(PrintStream&) const; + + // Print information about this heap and its descendents. This is a multi-line dump. + void deepDump(PrintStream&, unsigned indent = 0) const; + +private: + friend class AbstractHeapRepository; + + NO_RETURN_DUE_TO_CRASH void badRangeError() const; + + AbstractHeap* m_parent { nullptr }; + Vector<AbstractHeap*> m_children; + intptr_t m_offset { 0 }; + B3::HeapRange m_range; + const char* m_heapName { nullptr }; +}; + +class IndexedAbstractHeap { +public: + IndexedAbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset, size_t elementSize); + ~IndexedAbstractHeap(); + + const AbstractHeap& atAnyIndex() const { return m_heapForAnyIndex; } + + const AbstractHeap& at(ptrdiff_t index) + { + if (static_cast<size_t>(index) < m_smallIndices.size()) + return returnInitialized(m_smallIndices[index], index); + return atSlow(index); + } + + const AbstractHeap& operator[](ptrdiff_t index) { return at(index); } + + TypedPointer baseIndex(Output& out, LValue base, LValue index, JSValue indexAsConstant = JSValue(), ptrdiff_t offset = 0); + + void dump(PrintStream&) const; + +private: + const AbstractHeap& returnInitialized(AbstractHeap& field, ptrdiff_t index) + { + if (UNLIKELY(!field.isInitialized())) + initialize(field, index); + return field; + } + + const AbstractHeap& atSlow(ptrdiff_t index); + void initialize(AbstractHeap& field, ptrdiff_t index); + + AbstractHeap m_heapForAnyIndex; + size_t m_heapNameLength; + ptrdiff_t m_offset; + size_t m_elementSize; + std::array<AbstractHeap, 16> m_smallIndices; + + struct WithoutZeroOrOneHashTraits : WTF::GenericHashTraits<ptrdiff_t> { + static void constructDeletedValue(ptrdiff_t& slot) { slot = 1; } + static bool isDeletedValue(ptrdiff_t value) { return value == 1; } + }; + typedef HashMap<ptrdiff_t, std::unique_ptr<AbstractHeap>, WTF::IntHash<ptrdiff_t>, WithoutZeroOrOneHashTraits> MapType; + + std::unique_ptr<MapType> m_largeIndices; + Vector<CString, 16> m_largeIndexNames; +}; + +// A numbered abstract heap is like an indexed abstract heap, except that you +// can't rely on there being a relationship between the number you use to +// retrieve the sub-heap, and the offset that this heap has. (In particular, +// the sub-heaps don't have indices.) + +class NumberedAbstractHeap { +public: + NumberedAbstractHeap(AbstractHeap* parent, const char* heapName); + ~NumberedAbstractHeap(); + + const AbstractHeap& atAnyNumber() const { return m_indexedHeap.atAnyIndex(); } + + const AbstractHeap& at(unsigned number) { return m_indexedHeap.at(number); } + const AbstractHeap& operator[](unsigned number) { return at(number); } + + void dump(PrintStream&) const; + +private: + + // We use the fact that the indexed heap already has a superset of the + // functionality we need. + IndexedAbstractHeap m_indexedHeap; +}; + +class AbsoluteAbstractHeap { +public: + AbsoluteAbstractHeap(AbstractHeap* parent, const char* heapName); + ~AbsoluteAbstractHeap(); + + const AbstractHeap& atAnyAddress() const { return m_indexedHeap.atAnyIndex(); } + + const AbstractHeap& at(void* address) + { + return m_indexedHeap.at(bitwise_cast<ptrdiff_t>(address)); + } + + const AbstractHeap& operator[](void* address) { return at(address); } + + void dump(PrintStream&) const; + +private: + // The trick here is that the indexed heap is "indexed" by a pointer-width + // integer. Pointers are themselves pointer-width integers. So we can reuse + // all of the functionality. + IndexedAbstractHeap m_indexedHeap; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLAbstractHeap_h + diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp new file mode 100644 index 000000000..04d7a689d --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.cpp @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLAbstractHeapRepository.h" + +#if ENABLE(FTL_JIT) + +#include "B3CCallValue.h" +#include "B3MemoryValue.h" +#include "B3PatchpointValue.h" +#include "B3ValueInlines.h" +#include "DirectArguments.h" +#include "FTLState.h" +#include "GetterSetter.h" +#include "JSEnvironmentRecord.h" +#include "JSPropertyNameEnumerator.h" +#include "JSScope.h" +#include "JSCInlines.h" +#include "ScopedArguments.h" +#include "ScopedArgumentsTable.h" + +namespace JSC { namespace FTL { + +using namespace B3; + +AbstractHeapRepository::AbstractHeapRepository() + : root(nullptr, "jscRoot") + +#define ABSTRACT_HEAP_INITIALIZATION(name) , name(&root, #name) + FOR_EACH_ABSTRACT_HEAP(ABSTRACT_HEAP_INITIALIZATION) +#undef ABSTRACT_HEAP_INITIALIZATION + +#define ABSTRACT_FIELD_INITIALIZATION(name, offset) , name(&root, #name, offset) + FOR_EACH_ABSTRACT_FIELD(ABSTRACT_FIELD_INITIALIZATION) +#undef ABSTRACT_FIELD_INITIALIZATION + + , JSCell_freeListNext(JSCell_header) + +#define INDEXED_ABSTRACT_HEAP_INITIALIZATION(name, offset, size) , name(&root, #name, offset, size) + FOR_EACH_INDEXED_ABSTRACT_HEAP(INDEXED_ABSTRACT_HEAP_INITIALIZATION) +#undef INDEXED_ABSTRACT_HEAP_INITIALIZATION + +#define NUMBERED_ABSTRACT_HEAP_INITIALIZATION(name) , name(&root, #name) + FOR_EACH_NUMBERED_ABSTRACT_HEAP(NUMBERED_ABSTRACT_HEAP_INITIALIZATION) +#undef NUMBERED_ABSTRACT_HEAP_INITIALIZATION + + , absolute(&root, "absolute") +{ + // Make sure that our explicit assumptions about the StructureIDBlob match reality. + RELEASE_ASSERT(!(JSCell_indexingType.offset() & (sizeof(int32_t) - 1))); + RELEASE_ASSERT(JSCell_indexingType.offset() + 1 == JSCell_typeInfoType.offset()); + RELEASE_ASSERT(JSCell_indexingType.offset() + 2 == JSCell_typeInfoFlags.offset()); + RELEASE_ASSERT(JSCell_indexingType.offset() + 3 == JSCell_cellState.offset()); + + JSCell_structureID.changeParent(&JSCell_header); + JSCell_usefulBytes.changeParent(&JSCell_header); + JSCell_indexingType.changeParent(&JSCell_usefulBytes); + JSCell_typeInfoType.changeParent(&JSCell_usefulBytes); + JSCell_typeInfoFlags.changeParent(&JSCell_usefulBytes); + JSCell_cellState.changeParent(&JSCell_usefulBytes); + + RELEASE_ASSERT(!JSCell_freeListNext.offset()); +} + +AbstractHeapRepository::~AbstractHeapRepository() +{ +} + +void AbstractHeapRepository::decorateMemory(const AbstractHeap* heap, Value* value) +{ + m_heapForMemory.append(HeapForValue(heap, value)); +} + +void AbstractHeapRepository::decorateCCallRead(const AbstractHeap* heap, Value* value) +{ + m_heapForCCallRead.append(HeapForValue(heap, value)); +} + +void AbstractHeapRepository::decorateCCallWrite(const AbstractHeap* heap, Value* value) +{ + m_heapForCCallWrite.append(HeapForValue(heap, value)); +} + +void AbstractHeapRepository::decoratePatchpointRead(const AbstractHeap* heap, Value* value) +{ + m_heapForPatchpointRead.append(HeapForValue(heap, value)); +} + +void AbstractHeapRepository::decoratePatchpointWrite(const AbstractHeap* heap, Value* value) +{ + m_heapForPatchpointWrite.append(HeapForValue(heap, value)); +} + +void AbstractHeapRepository::computeRangesAndDecorateInstructions() +{ + root.compute(); + + if (verboseCompilationEnabled()) { + dataLog("Abstract Heap Repository:\n"); + root.deepDump(WTF::dataFile()); + } + + for (HeapForValue entry : m_heapForMemory) + entry.value->as<MemoryValue>()->setRange(entry.heap->range()); + for (HeapForValue entry : m_heapForCCallRead) + entry.value->as<CCallValue>()->effects.reads = entry.heap->range(); + for (HeapForValue entry : m_heapForCCallWrite) + entry.value->as<CCallValue>()->effects.writes = entry.heap->range(); + for (HeapForValue entry : m_heapForPatchpointRead) + entry.value->as<CCallValue>()->effects.reads = entry.heap->range(); + for (HeapForValue entry : m_heapForPatchpointWrite) + entry.value->as<CCallValue>()->effects.writes = entry.heap->range(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h new file mode 100644 index 000000000..9088455be --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAbstractHeapRepository.h @@ -0,0 +1,228 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLAbstractHeapRepository_h +#define FTLAbstractHeapRepository_h + +#if ENABLE(FTL_JIT) + +#include "B3Value.h" +#include "DFGArrayMode.h" +#include "FTLAbstractHeap.h" +#include "IndexingType.h" + +namespace JSC { namespace FTL { + +#define FOR_EACH_ABSTRACT_HEAP(macro) \ + macro(typedArrayProperties) + +#define FOR_EACH_ABSTRACT_FIELD(macro) \ + macro(ArrayBuffer_data, ArrayBuffer::offsetOfData()) \ + macro(Butterfly_arrayBuffer, Butterfly::offsetOfArrayBuffer()) \ + macro(Butterfly_publicLength, Butterfly::offsetOfPublicLength()) \ + macro(Butterfly_vectorLength, Butterfly::offsetOfVectorLength()) \ + macro(CallFrame_callerFrame, CallFrame::callerFrameOffset()) \ + macro(DirectArguments_callee, DirectArguments::offsetOfCallee()) \ + macro(DirectArguments_length, DirectArguments::offsetOfLength()) \ + macro(DirectArguments_minCapacity, DirectArguments::offsetOfMinCapacity()) \ + macro(DirectArguments_overrides, DirectArguments::offsetOfOverrides()) \ + macro(GetterSetter_getter, GetterSetter::offsetOfGetter()) \ + macro(GetterSetter_setter, GetterSetter::offsetOfSetter()) \ + macro(JSArrayBufferView_length, JSArrayBufferView::offsetOfLength()) \ + macro(JSArrayBufferView_mode, JSArrayBufferView::offsetOfMode()) \ + macro(JSArrayBufferView_vector, JSArrayBufferView::offsetOfVector()) \ + macro(JSCell_cellState, JSCell::cellStateOffset()) \ + macro(JSCell_header, 0) \ + macro(JSCell_indexingType, JSCell::indexingTypeOffset()) \ + macro(JSCell_structureID, JSCell::structureIDOffset()) \ + macro(JSCell_typeInfoFlags, JSCell::typeInfoFlagsOffset()) \ + macro(JSCell_typeInfoType, JSCell::typeInfoTypeOffset()) \ + macro(JSCell_usefulBytes, JSCell::indexingTypeOffset()) \ + macro(JSFunction_executable, JSFunction::offsetOfExecutable()) \ + macro(JSFunction_scope, JSFunction::offsetOfScopeChain()) \ + macro(JSFunction_rareData, JSFunction::offsetOfRareData()) \ + macro(JSObject_butterfly, JSObject::butterflyOffset()) \ + macro(JSPropertyNameEnumerator_cachedInlineCapacity, JSPropertyNameEnumerator::cachedInlineCapacityOffset()) \ + macro(JSPropertyNameEnumerator_cachedPropertyNamesVector, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()) \ + macro(JSPropertyNameEnumerator_cachedStructureID, JSPropertyNameEnumerator::cachedStructureIDOffset()) \ + macro(JSPropertyNameEnumerator_endGenericPropertyIndex, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()) \ + macro(JSPropertyNameEnumerator_endStructurePropertyIndex, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()) \ + macro(JSPropertyNameEnumerator_indexLength, JSPropertyNameEnumerator::indexedLengthOffset()) \ + macro(JSScope_next, JSScope::offsetOfNext()) \ + macro(JSString_flags, JSString::offsetOfFlags()) \ + macro(JSString_length, JSString::offsetOfLength()) \ + macro(JSString_value, JSString::offsetOfValue()) \ + macro(JSSymbolTableObject_symbolTable, JSSymbolTableObject::offsetOfSymbolTable()) \ + macro(JSWrapperObject_internalValue, JSWrapperObject::internalValueOffset()) \ + macro(MarkedAllocator_freeListHead, MarkedAllocator::offsetOfFreeListHead()) \ + macro(ScopedArguments_overrodeThings, ScopedArguments::offsetOfOverrodeThings()) \ + macro(ScopedArguments_scope, ScopedArguments::offsetOfScope()) \ + macro(ScopedArguments_table, ScopedArguments::offsetOfTable()) \ + macro(ScopedArguments_totalLength, ScopedArguments::offsetOfTotalLength()) \ + macro(ScopedArgumentsTable_arguments, ScopedArgumentsTable::offsetOfArguments()) \ + macro(ScopedArgumentsTable_length, ScopedArgumentsTable::offsetOfLength()) \ + macro(StringImpl_data, StringImpl::dataOffset()) \ + macro(StringImpl_hashAndFlags, StringImpl::flagsOffset()) \ + macro(Structure_classInfo, Structure::classInfoOffset()) \ + macro(Structure_globalObject, Structure::globalObjectOffset()) \ + macro(Structure_prototype, Structure::prototypeOffset()) \ + macro(Structure_structureID, Structure::structureIDOffset()) \ + macro(Symbol_privateName, Symbol::offsetOfPrivateName()) + +#define FOR_EACH_INDEXED_ABSTRACT_HEAP(macro) \ + macro(DirectArguments_storage, DirectArguments::storageOffset(), sizeof(EncodedJSValue)) \ + macro(JSEnvironmentRecord_variables, JSEnvironmentRecord::offsetOfVariables(), sizeof(EncodedJSValue)) \ + macro(JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, 0, sizeof(WriteBarrier<JSString>)) \ + macro(JSRopeString_fibers, JSRopeString::offsetOfFibers(), sizeof(WriteBarrier<JSString>)) \ + macro(MarkedSpace_Subspace_impreciseAllocators, OBJECT_OFFSETOF(MarkedSpace::Subspace, impreciseAllocators), sizeof(MarkedAllocator)) \ + macro(MarkedSpace_Subspace_preciseAllocators, OBJECT_OFFSETOF(MarkedSpace::Subspace, preciseAllocators), sizeof(MarkedAllocator)) \ + macro(ScopedArguments_overflowStorage, ScopedArguments::overflowStorageOffset(), sizeof(EncodedJSValue)) \ + macro(WriteBarrierBuffer_bufferContents, 0, sizeof(JSCell*)) \ + macro(characters8, 0, sizeof(LChar)) \ + macro(characters16, 0, sizeof(UChar)) \ + macro(indexedInt32Properties, 0, sizeof(EncodedJSValue)) \ + macro(indexedDoubleProperties, 0, sizeof(double)) \ + macro(indexedContiguousProperties, 0, sizeof(EncodedJSValue)) \ + macro(indexedArrayStorageProperties, 0, sizeof(EncodedJSValue)) \ + macro(scopedArgumentsTableArguments, 0, sizeof(int32_t)) \ + macro(singleCharacterStrings, 0, sizeof(JSString*)) \ + macro(structureTable, 0, sizeof(Structure*)) \ + macro(variables, 0, sizeof(Register)) + +#define FOR_EACH_NUMBERED_ABSTRACT_HEAP(macro) \ + macro(properties) + +// This class is meant to be cacheable between compilations, but it doesn't have to be. +// Doing so saves on creation of nodes. But clearing it will save memory. + +class AbstractHeapRepository { + WTF_MAKE_NONCOPYABLE(AbstractHeapRepository); +public: + AbstractHeapRepository(); + ~AbstractHeapRepository(); + + AbstractHeap root; + +#define ABSTRACT_HEAP_DECLARATION(name) AbstractHeap name; + FOR_EACH_ABSTRACT_HEAP(ABSTRACT_HEAP_DECLARATION) +#undef ABSTRACT_HEAP_DECLARATION + +#define ABSTRACT_FIELD_DECLARATION(name, offset) AbstractHeap name; + FOR_EACH_ABSTRACT_FIELD(ABSTRACT_FIELD_DECLARATION) +#undef ABSTRACT_FIELD_DECLARATION + + AbstractHeap& JSCell_freeListNext; + +#define INDEXED_ABSTRACT_HEAP_DECLARATION(name, offset, size) IndexedAbstractHeap name; + FOR_EACH_INDEXED_ABSTRACT_HEAP(INDEXED_ABSTRACT_HEAP_DECLARATION) +#undef INDEXED_ABSTRACT_HEAP_DECLARATION + +#define NUMBERED_ABSTRACT_HEAP_DECLARATION(name) NumberedAbstractHeap name; + FOR_EACH_NUMBERED_ABSTRACT_HEAP(NUMBERED_ABSTRACT_HEAP_DECLARATION) +#undef NUMBERED_ABSTRACT_HEAP_DECLARATION + + AbsoluteAbstractHeap absolute; + + IndexedAbstractHeap* forIndexingType(IndexingType indexingType) + { + switch (indexingType) { + case ALL_BLANK_INDEXING_TYPES: + case ALL_UNDECIDED_INDEXING_TYPES: + return 0; + + case ALL_INT32_INDEXING_TYPES: + return &indexedInt32Properties; + + case ALL_DOUBLE_INDEXING_TYPES: + return &indexedDoubleProperties; + + case ALL_CONTIGUOUS_INDEXING_TYPES: + return &indexedContiguousProperties; + + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return &indexedArrayStorageProperties; + + default: + RELEASE_ASSERT_NOT_REACHED(); + return 0; + } + } + + IndexedAbstractHeap& forArrayType(DFG::Array::Type type) + { + switch (type) { + case DFG::Array::Int32: + return indexedInt32Properties; + case DFG::Array::Double: + return indexedDoubleProperties; + case DFG::Array::Contiguous: + return indexedContiguousProperties; + case DFG::Array::ArrayStorage: + case DFG::Array::SlowPutArrayStorage: + return indexedArrayStorageProperties; + default: + RELEASE_ASSERT_NOT_REACHED(); + return indexedInt32Properties; + } + } + + void decorateMemory(const AbstractHeap*, B3::Value*); + void decorateCCallRead(const AbstractHeap*, B3::Value*); + void decorateCCallWrite(const AbstractHeap*, B3::Value*); + void decoratePatchpointRead(const AbstractHeap*, B3::Value*); + void decoratePatchpointWrite(const AbstractHeap*, B3::Value*); + + void computeRangesAndDecorateInstructions(); + +private: + + struct HeapForValue { + HeapForValue() + { + } + + HeapForValue(const AbstractHeap* heap, B3::Value* value) + : heap(heap) + , value(value) + { + } + + const AbstractHeap* heap { nullptr }; + B3::Value* value { nullptr }; + }; + + Vector<HeapForValue> m_heapForMemory; + Vector<HeapForValue> m_heapForCCallRead; + Vector<HeapForValue> m_heapForCCallWrite; + Vector<HeapForValue> m_heapForPatchpointRead; + Vector<HeapForValue> m_heapForPatchpointWrite; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLAbstractHeapRepository_h + diff --git a/Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp new file mode 100644 index 000000000..6c00fa4d9 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLAvailableRecovery.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +void AvailableRecovery::dump(PrintStream& out) const +{ + out.print(node(), " => ", opcode(), ", ", RawPointer(m_left), ", ", RawPointer(m_right), ", ", m_format); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLAvailableRecovery.h b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.h new file mode 100644 index 000000000..8932576c3 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLAvailableRecovery.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLAvailableRecovery_h +#define FTLAvailableRecovery_h + +#if ENABLE(FTL_JIT) + +#include "DFGNode.h" +#include "DataFormat.h" +#include "FTLAbbreviatedTypes.h" +#include "FTLRecoveryOpcode.h" + +namespace JSC { namespace FTL { + +class AvailableRecovery { +public: + AvailableRecovery() + : m_node(0) + , m_format(DataFormatNone) + , m_opcode(AddRecovery) + , m_left(0) + , m_right(0) + { + } + + AvailableRecovery(DFG::Node* node, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format) + : m_node(node) + , m_format(format) + , m_opcode(opcode) + , m_left(left) + , m_right(right) + { + } + + DFG::Node* node() const { return m_node; } + DataFormat format() const { return m_format; } + RecoveryOpcode opcode() const { return m_opcode; } + LValue left() const { return m_left; } + LValue right() const { return m_right; } + + void dump(PrintStream&) const; + +private: + DFG::Node* m_node; + DataFormat m_format; + RecoveryOpcode m_opcode; + LValue m_left; + LValue m_right; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLAvailableRecovery_h + diff --git a/Source/JavaScriptCore/ftl/FTLCapabilities.cpp b/Source/JavaScriptCore/ftl/FTLCapabilities.cpp new file mode 100644 index 000000000..b0ea72d1d --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLCapabilities.cpp @@ -0,0 +1,521 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLCapabilities.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +using namespace DFG; + +static bool verboseCapabilities() +{ + return verboseCompilationEnabled() || Options::verboseFTLFailure(); +} + +inline CapabilityLevel canCompile(Node* node) +{ + // NOTE: If we ever have phantom arguments, we can compile them but we cannot + // OSR enter. + + switch (node->op()) { + case JSConstant: + case GetLocal: + case SetLocal: + case PutStack: + case KillStack: + case GetStack: + case MovHint: + case ZombieHint: + case ExitOK: + case Phantom: + case Flush: + case PhantomLocal: + case SetArgument: + case Return: + case BitAnd: + case BitOr: + case BitXor: + case BitRShift: + case BitLShift: + case BitURShift: + case CheckStructure: + case DoubleAsInt32: + case ArrayifyToStructure: + case PutStructure: + case GetButterfly: + case GetButterflyReadOnly: + case NewObject: + case NewArray: + case NewArrayBuffer: + case NewTypedArray: + case GetByOffset: + case GetGetterSetterByOffset: + case GetGetter: + case GetSetter: + case PutByOffset: + case GetGlobalVar: + case GetGlobalLexicalVariable: + case PutGlobalVariable: + case ValueAdd: + case StrCat: + case ArithAdd: + case ArithClz32: + case ArithSub: + case ArithMul: + case ArithDiv: + case ArithMod: + case ArithMin: + case ArithMax: + case ArithAbs: + case ArithSin: + case ArithCos: + case ArithPow: + case ArithRandom: + case ArithRound: + case ArithFloor: + case ArithCeil: + case ArithSqrt: + case ArithLog: + case ArithFRound: + case ArithNegate: + case UInt32ToNumber: + case Jump: + case ForceOSRExit: + case Phi: + case Upsilon: + case ExtractOSREntryLocal: + case LoopHint: + case SkipScope: + case CreateActivation: + case NewArrowFunction: + case NewFunction: + case NewGeneratorFunction: + case GetClosureVar: + case PutClosureVar: + case CreateDirectArguments: + case CreateScopedArguments: + case CreateClonedArguments: + case GetFromArguments: + case PutToArguments: + case InvalidationPoint: + case StringCharAt: + case CheckCell: + case CheckBadCell: + case CheckNotEmpty: + case CheckIdent: + case CheckWatchdogTimer: + case StringCharCodeAt: + case StringFromCharCode: + case AllocatePropertyStorage: + case ReallocatePropertyStorage: + case GetTypedArrayByteOffset: + case NotifyWrite: + case StoreBarrier: + case Call: + case TailCall: + case TailCallInlinedCaller: + case Construct: + case CallVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + case CallForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case ConstructForwardVarargs: + case LoadVarargs: + case ValueToInt32: + case Branch: + case LogicalNot: + case CheckInBounds: + case ConstantStoragePointer: + case Check: + case CountExecution: + case GetExecutable: + case GetScope: + case GetCallee: + case GetArgumentCount: + case ToString: + case CallStringConstructor: + case MakeRope: + case NewArrayWithSize: + case GetById: + case GetByIdFlush: + case ToThis: + case MultiGetByOffset: + case MultiPutByOffset: + case ToPrimitive: + case Throw: + case ThrowReferenceError: + case Unreachable: + case IsUndefined: + case IsBoolean: + case IsNumber: + case IsString: + case IsObject: + case IsObjectOrNull: + case IsFunction: + case CheckTypeInfoFlags: + case OverridesHasInstance: + case InstanceOf: + case InstanceOfCustom: + case DoubleRep: + case ValueRep: + case Int52Rep: + case DoubleConstant: + case Int52Constant: + case BooleanToNumber: + case HasGenericProperty: + case HasStructureProperty: + case GetDirectPname: + case GetEnumerableLength: + case GetPropertyEnumerator: + case GetEnumeratorStructurePname: + case GetEnumeratorGenericPname: + case ToIndexString: + case BottomValue: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PutHint: + case CheckStructureImmediate: + case MaterializeNewObject: + case MaterializeCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case GetMyArgumentByVal: + case ForwardVarargs: + case Switch: + case TypeOf: + case PutGetterById: + case PutSetterById: + case PutGetterSetterById: + case PutGetterByVal: + case PutSetterByVal: + case CopyRest: + case GetRestLength: + case RegExpExec: + case RegExpTest: + case NewRegexp: + case StringReplace: + // These are OK. + break; + + case Identity: + // No backend handles this because it will be optimized out. But we may check + // for capabilities before optimization. It would be a deep error to remove this + // case because it would prevent us from catching bugs where the FTL backend + // pipeline failed to optimize out an Identity. + break; + case In: + if (node->child2().useKind() == CellUse) + break; + return CannotCompile; + case PutByIdDirect: + case PutById: + case PutByIdFlush: + if (node->child1().useKind() == CellUse) + break; + return CannotCompile; + case GetIndexedPropertyStorage: + if (node->arrayMode().type() == Array::String) + break; + if (isTypedView(node->arrayMode().typedArrayType())) + break; + return CannotCompile; + case CheckArray: + switch (node->arrayMode().type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: + case Array::DirectArguments: + case Array::ScopedArguments: + break; + default: + if (isTypedView(node->arrayMode().typedArrayType())) + break; + return CannotCompile; + } + break; + case GetArrayLength: + switch (node->arrayMode().type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: + case Array::String: + case Array::DirectArguments: + case Array::ScopedArguments: + break; + default: + if (node->arrayMode().isSomeTypedArrayView()) + break; + return CannotCompile; + } + break; + case HasIndexedProperty: + switch (node->arrayMode().type()) { + case Array::ForceExit: + case Array::Int32: + case Array::Double: + case Array::Contiguous: + break; + default: + return CannotCompile; + } + break; + case GetByVal: + switch (node->arrayMode().type()) { + case Array::ForceExit: + case Array::Generic: + case Array::String: + case Array::Int32: + case Array::Double: + case Array::Contiguous: + case Array::Undecided: + case Array::DirectArguments: + case Array::ScopedArguments: + break; + default: + if (isTypedView(node->arrayMode().typedArrayType())) + return CanCompileAndOSREnter; + return CannotCompile; + } + break; + case PutByVal: + case PutByValAlias: + case PutByValDirect: + switch (node->arrayMode().type()) { + case Array::ForceExit: + case Array::Generic: + case Array::Int32: + case Array::Double: + case Array::Contiguous: + break; + default: + if (isTypedView(node->arrayMode().typedArrayType())) + return CanCompileAndOSREnter; + return CannotCompile; + } + break; + case ArrayPush: + case ArrayPop: + switch (node->arrayMode().type()) { + case Array::Int32: + case Array::Contiguous: + case Array::Double: + break; + default: + return CannotCompile; + } + break; + case CompareEq: + if (node->isBinaryUseKind(Int32Use)) + break; + if (node->isBinaryUseKind(Int52RepUse)) + break; + if (node->isBinaryUseKind(DoubleRepUse)) + break; + if (node->isBinaryUseKind(StringIdentUse)) + break; + if (node->isBinaryUseKind(StringUse)) + break; + if (node->isBinaryUseKind(SymbolUse)) + break; + if (node->isBinaryUseKind(ObjectUse)) + break; + if (node->isBinaryUseKind(UntypedUse)) + break; + if (node->isBinaryUseKind(BooleanUse)) + break; + if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) + break; + if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) + break; + if (node->child1().useKind() == OtherUse || node->child2().useKind() == OtherUse) + break; + return CannotCompile; + case CompareStrictEq: + if (node->isBinaryUseKind(Int32Use)) + break; + if (node->isBinaryUseKind(Int52RepUse)) + break; + if (node->isBinaryUseKind(DoubleRepUse)) + break; + if (node->isBinaryUseKind(StringIdentUse)) + break; + if (node->isBinaryUseKind(StringUse)) + break; + if (node->isBinaryUseKind(ObjectUse, UntypedUse)) + break; + if (node->isBinaryUseKind(UntypedUse, ObjectUse)) + break; + if (node->isBinaryUseKind(ObjectUse)) + break; + if (node->isBinaryUseKind(BooleanUse)) + break; + if (node->isBinaryUseKind(SymbolUse)) + break; + if (node->isBinaryUseKind(MiscUse, UntypedUse)) + break; + if (node->isBinaryUseKind(UntypedUse, MiscUse)) + break; + if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) + break; + if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) + break; + return CannotCompile; + case CompareLess: + case CompareLessEq: + case CompareGreater: + case CompareGreaterEq: + if (node->isBinaryUseKind(Int32Use)) + break; + if (node->isBinaryUseKind(Int52RepUse)) + break; + if (node->isBinaryUseKind(DoubleRepUse)) + break; + if (node->isBinaryUseKind(UntypedUse)) + break; + return CannotCompile; + default: + // Don't know how to handle anything else. + return CannotCompile; + } + return CanCompileAndOSREnter; +} + +CapabilityLevel canCompile(Graph& graph) +{ + if (graph.m_codeBlock->instructionCount() > Options::maximumFTLCandidateInstructionCount()) { + if (verboseCapabilities()) + dataLog("FTL rejecting ", *graph.m_codeBlock, " because it's too big.\n"); + return CannotCompile; + } + + if (graph.m_codeBlock->codeType() != FunctionCode) { + if (verboseCapabilities()) + dataLog("FTL rejecting ", *graph.m_codeBlock, " because it doesn't belong to a function.\n"); + return CannotCompile; + } + + CapabilityLevel result = CanCompileAndOSREnter; + + for (BlockIndex blockIndex = graph.numBlocks(); blockIndex--;) { + BasicBlock* block = graph.block(blockIndex); + if (!block) + continue; + + // We don't care if we can compile blocks that the CFA hasn't visited. + if (!block->cfaHasVisited) + continue; + + for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + for (unsigned childIndex = graph.numChildren(node); childIndex--;) { + Edge edge = graph.child(node, childIndex); + if (!edge) + continue; + switch (edge.useKind()) { + case UntypedUse: + case Int32Use: + case KnownInt32Use: + case Int52RepUse: + case NumberUse: + case RealNumberUse: + case DoubleRepUse: + case DoubleRepRealUse: + case BooleanUse: + case KnownBooleanUse: + case CellUse: + case KnownCellUse: + case CellOrOtherUse: + case ObjectUse: + case FunctionUse: + case ObjectOrOtherUse: + case StringUse: + case StringOrOtherUse: + case KnownStringUse: + case KnownPrimitiveUse: + case StringObjectUse: + case StringOrStringObjectUse: + case SymbolUse: + case FinalObjectUse: + case RegExpObjectUse: + case NotCellUse: + case OtherUse: + case MiscUse: + case StringIdentUse: + case NotStringVarUse: + case MachineIntUse: + case DoubleRepMachineIntUse: + // These are OK. + break; + default: + // Don't know how to handle anything else. + if (verboseCapabilities()) { + dataLog("FTL rejecting node in ", *graph.m_codeBlock, " because of bad use kind: ", edge.useKind(), " in node:\n"); + graph.dump(WTF::dataFile(), " ", node); + } + return CannotCompile; + } + } + + switch (canCompile(node)) { + case CannotCompile: + if (verboseCapabilities()) { + dataLog("FTL rejecting node in ", *graph.m_codeBlock, ":\n"); + graph.dump(WTF::dataFile(), " ", node); + } + return CannotCompile; + + case CanCompile: + if (result == CanCompileAndOSREnter && verboseCompilationEnabled()) { + dataLog("FTL disabling OSR entry because of node:\n"); + graph.dump(WTF::dataFile(), " ", node); + } + result = CanCompile; + break; + + case CanCompileAndOSREnter: + break; + } + + if (node->op() == ForceOSRExit) + break; + } + } + + return result; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLCapabilities.h b/Source/JavaScriptCore/ftl/FTLCapabilities.h new file mode 100644 index 000000000..4a79b2a1b --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLCapabilities.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLCapabilities_h +#define FTLCapabilities_h + +#if ENABLE(FTL_JIT) + +#include "DFGGraph.h" + +namespace JSC { namespace FTL { + +enum CapabilityLevel { + CannotCompile, + CanCompile, + CanCompileAndOSREnter +}; + +CapabilityLevel canCompile(DFG::Graph&); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLCapabilities_h + diff --git a/Source/JavaScriptCore/ftl/FTLCommonValues.cpp b/Source/JavaScriptCore/ftl/FTLCommonValues.cpp new file mode 100644 index 000000000..a2801e123 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLCommonValues.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLCommonValues.h" + +#include "B3BasicBlockInlines.h" +#include "B3Const32Value.h" +#include "B3Const64Value.h" +#include "B3ConstDoubleValue.h" +#include "B3ConstPtrValue.h" +#include "B3ProcedureInlines.h" +#include "B3Type.h" +#include "B3ValueInlines.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +using namespace B3; + +CommonValues::CommonValues() + : voidType(B3::Void) + , boolean(B3::Int32) + , int32(B3::Int32) + , int64(B3::Int64) + , intPtr(B3::pointerType()) + , floatType(B3::Float) + , doubleType(B3::Double) +{ +} + +void CommonValues::initializeConstants(B3::Procedure& proc, B3::BasicBlock* block) +{ + int32Zero = block->appendNew<Const32Value>(proc, Origin(), 0); + int32One = block->appendNew<Const32Value>(proc, Origin(), 1); + booleanTrue = int32One; + booleanFalse = int32Zero; + int64Zero = block->appendNew<Const64Value>(proc, Origin(), 0); + intPtrZero = block->appendNew<ConstPtrValue>(proc, Origin(), 0); + intPtrOne = block->appendNew<ConstPtrValue>(proc, Origin(), 1); + intPtrTwo = block->appendNew<ConstPtrValue>(proc, Origin(), 2); + intPtrThree = block->appendNew<ConstPtrValue>(proc, Origin(), 3); + intPtrEight = block->appendNew<ConstPtrValue>(proc, Origin(), 8); + doubleZero = block->appendNew<ConstDoubleValue>(proc, Origin(), 0.); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLCommonValues.h b/Source/JavaScriptCore/ftl/FTLCommonValues.h new file mode 100644 index 000000000..49bf04b47 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLCommonValues.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLCommonValues_h +#define FTLCommonValues_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" +#include "FTLValueRange.h" + +namespace JSC { + +namespace B3 { +class BasicBlock; +class Procedure; +} + +namespace FTL { + +class CommonValues { +public: + CommonValues(); + + void initializeConstants(B3::Procedure&, B3::BasicBlock*); + + const LType voidType; + const LType boolean; + const LType int32; + const LType int64; + const LType intPtr; + const LType floatType; + const LType doubleType; + LValue booleanTrue { nullptr }; + LValue booleanFalse { nullptr }; + LValue int32Zero { nullptr }; + LValue int32One { nullptr }; + LValue int64Zero { nullptr }; + LValue intPtrZero { nullptr }; + LValue intPtrOne { nullptr }; + LValue intPtrTwo { nullptr }; + LValue intPtrThree { nullptr }; + LValue intPtrEight { nullptr }; + LValue doubleZero { nullptr }; + + const unsigned rangeKind { 0 }; + const unsigned profKind { 0 }; + const LValue branchWeights { nullptr }; + + const ValueRange nonNegativeInt32; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLCommonValues_h + diff --git a/Source/JavaScriptCore/ftl/FTLCompile.cpp b/Source/JavaScriptCore/ftl/FTLCompile.cpp new file mode 100644 index 000000000..2efbf8df4 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLCompile.cpp @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLCompile.h" + +#if ENABLE(FTL_JIT) + +#include "AirCode.h" +#include "B3Generate.h" +#include "B3ProcedureInlines.h" +#include "B3StackSlot.h" +#include "CodeBlockWithJITType.h" +#include "CCallHelpers.h" +#include "DFGCommon.h" +#include "DFGGraphSafepoint.h" +#include "DFGOperations.h" +#include "DataView.h" +#include "Disassembler.h" +#include "FTLJITCode.h" +#include "FTLThunks.h" +#include "JITSubGenerator.h" +#include "LinkBuffer.h" +#include "PCToCodeOriginMap.h" +#include "ScratchRegisterAllocator.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +void compile(State& state, Safepoint::Result& safepointResult) +{ + Graph& graph = state.graph; + CodeBlock* codeBlock = graph.m_codeBlock; + VM& vm = graph.m_vm; + + { + GraphSafepoint safepoint(state.graph, safepointResult); + + B3::prepareForGeneration(*state.proc); + } + + if (safepointResult.didGetCancelled()) + return; + RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting()); + + if (state.allocationFailed) + return; + + std::unique_ptr<RegisterAtOffsetList> registerOffsets = + std::make_unique<RegisterAtOffsetList>(state.proc->calleeSaveRegisters()); + if (shouldDumpDisassembly()) { + dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n"); + dataLog(" ", *registerOffsets, "\n"); + } + state.graph.m_codeBlock->setCalleeSaveRegisters(WTFMove(registerOffsets)); + ASSERT(!(state.proc->frameSize() % sizeof(EncodedJSValue))); + state.jitCode->common.frameRegisterCount = state.proc->frameSize() / sizeof(EncodedJSValue); + + int localsOffset = + state.capturedValue->offsetFromFP() / sizeof(EncodedJSValue) + graph.m_nextMachineLocal; + if (shouldDumpDisassembly()) { + dataLog( + "localsOffset = ", localsOffset, " for stack slot: ", + pointerDump(state.capturedValue), " at ", RawPointer(state.capturedValue), "\n"); + } + + for (unsigned i = graph.m_inlineVariableData.size(); i--;) { + InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame; + + if (inlineCallFrame->argumentCountRegister.isValid()) + inlineCallFrame->argumentCountRegister += localsOffset; + + for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { + inlineCallFrame->arguments[argument] = + inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset); + } + + if (inlineCallFrame->isClosureCall) { + inlineCallFrame->calleeRecovery = + inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset); + } + + if (graph.hasDebuggerEnabled()) + codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset); + } + for (OSRExitDescriptor& descriptor : state.jitCode->osrExitDescriptors) { + for (unsigned i = descriptor.m_values.size(); i--;) + descriptor.m_values[i] = descriptor.m_values[i].withLocalsOffset(localsOffset); + for (ExitTimeObjectMaterialization* materialization : descriptor.m_materializations) + materialization->accountForLocalsOffset(localsOffset); + } + + // We will add exception handlers while generating. + codeBlock->clearExceptionHandlers(); + + CCallHelpers jit(&vm, codeBlock); + B3::generate(*state.proc, jit); + + // Emit the exception handler. + *state.exceptionHandler = jit.label(); + jit.copyCalleeSavesToVMCalleeSavesBuffer(); + jit.move(MacroAssembler::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0); + jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + CCallHelpers::Call call = jit.call(); + jit.jumpToExceptionHandler(); + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(call, FunctionPtr(lookupExceptionHandler)); + }); + + state.finalizer->b3CodeLinkBuffer = std::make_unique<LinkBuffer>( + vm, jit, codeBlock, JITCompilationCanFail); + if (state.finalizer->b3CodeLinkBuffer->didFailToAllocate()) { + state.allocationFailed = true; + return; + } + + B3::PCToOriginMap originMap = state.proc->releasePCToOriginMap(); + if (vm.shouldBuilderPCToCodeOriginMapping()) + codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(PCToCodeOriginMapBuilder(vm, WTFMove(originMap)), *state.finalizer->b3CodeLinkBuffer)); + + state.generatedFunction = bitwise_cast<GeneratedFunction>( + state.finalizer->b3CodeLinkBuffer->entrypoint().executableAddress()); + state.jitCode->initializeB3Byproducts(state.proc->releaseByproducts()); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLCompile.h b/Source/JavaScriptCore/ftl/FTLCompile.h new file mode 100644 index 000000000..e4da4bd3d --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLCompile.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLCompile_h +#define FTLCompile_h + +#if ENABLE(FTL_JIT) + +#include "DFGSafepoint.h" +#include "FTLState.h" + +namespace JSC { namespace FTL { + +void compile(State&, DFG::Safepoint::Result&); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLCompile_h + diff --git a/Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp b/Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp new file mode 100644 index 000000000..023e119bf --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExceptionTarget.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLExceptionTarget.h" + +#if ENABLE(FTL_JIT) + +#include "FTLState.h" + +namespace JSC { namespace FTL { + +ExceptionTarget::~ExceptionTarget() +{ +} + +CodeLocationLabel ExceptionTarget::label(LinkBuffer& linkBuffer) +{ + if (m_isDefaultHandler) + return linkBuffer.locationOf(*m_defaultHandler); + return linkBuffer.locationOf(m_handle->label); +} + +Box<CCallHelpers::JumpList> ExceptionTarget::jumps(CCallHelpers& jit) +{ + Box<CCallHelpers::JumpList> result = Box<CCallHelpers::JumpList>::create(); + if (m_isDefaultHandler) { + Box<CCallHelpers::Label> defaultHandler = m_defaultHandler; + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(*result, linkBuffer.locationOf(*defaultHandler)); + }); + } else { + RefPtr<OSRExitHandle> handle = m_handle; + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(*result, linkBuffer.locationOf(handle->label)); + }); + } + return result; +} + +ExceptionTarget::ExceptionTarget( + bool isDefaultHandler, Box<CCallHelpers::Label> defaultHandler, RefPtr<OSRExitHandle> handle) + : m_isDefaultHandler(isDefaultHandler) + , m_defaultHandler(defaultHandler) + , m_handle(handle) +{ +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLExceptionTarget.h b/Source/JavaScriptCore/ftl/FTLExceptionTarget.h new file mode 100644 index 000000000..c338bb466 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExceptionTarget.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLExceptionTarget_h +#define FTLExceptionTarget_h + +#include "DFGCommon.h" + +#if ENABLE(FTL_JIT) + +#include "CCallHelpers.h" +#include "FTLOSRExitHandle.h" +#include <wtf/Box.h> +#include <wtf/ThreadSafeRefCounted.h> + +namespace JSC { namespace FTL { + +class State; + +class ExceptionTarget : public ThreadSafeRefCounted<ExceptionTarget> { +public: + ~ExceptionTarget(); + + // It's OK to call this during linking, but not any sooner. + CodeLocationLabel label(LinkBuffer&); + + // Or, you can get a JumpList at any time. Anything you add to this JumpList will be linked to + // the target's label. + Box<CCallHelpers::JumpList> jumps(CCallHelpers&); + +private: + friend class PatchpointExceptionHandle; + + ExceptionTarget(bool isDefaultHandler, Box<CCallHelpers::Label>, RefPtr<OSRExitHandle>); + + bool m_isDefaultHandler; + Box<CCallHelpers::Label> m_defaultHandler; + RefPtr<OSRExitHandle> m_handle; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLExceptionTarget_h + diff --git a/Source/JavaScriptCore/ftl/FTLExitArgument.cpp b/Source/JavaScriptCore/ftl/FTLExitArgument.cpp new file mode 100644 index 000000000..381b3d3a4 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitArgument.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLExitArgument.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +void ExitArgument::dump(PrintStream& out) const +{ + out.print("#", argument(), " as ", format()); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLExitArgument.h b/Source/JavaScriptCore/ftl/FTLExitArgument.h new file mode 100644 index 000000000..74c27d9d3 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitArgument.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLExitArgument_h +#define FTLExitArgument_h + +#if ENABLE(FTL_JIT) + +#include "DataFormat.h" +#include <wtf/PrintStream.h> + +namespace JSC { namespace FTL { + +struct ExitArgumentRepresentation { + DataFormat format; + unsigned argument; +}; + +class ExitArgument { +public: + ExitArgument() + { + m_representation.format = DataFormatNone; + } + + ExitArgument(DataFormat format, unsigned argument) + { + m_representation.format = format; + m_representation.argument = argument; + } + + explicit ExitArgument(ExitArgumentRepresentation representation) + { + m_representation = representation; + } + + bool operator!() const { return m_representation.format == DataFormatNone; } + + DataFormat format() const + { + ASSERT(*this); + return m_representation.format; + } + + unsigned argument() const + { + ASSERT(*this); + return m_representation.argument; + } + + ExitArgument withFormat(DataFormat format) + { + return ExitArgument(format, argument()); + } + + ExitArgumentRepresentation representation() const { return m_representation; } + + void dump(PrintStream&) const; + +private: + ExitArgumentRepresentation m_representation; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLExitArgument_h + diff --git a/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp new file mode 100644 index 000000000..074e7a69e --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLExitArgumentForOperand.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +void ExitArgumentForOperand::dump(PrintStream& out) const +{ + out.print(m_exitArgument, " for ", m_operand); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h new file mode 100644 index 000000000..c3dc6d4a5 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitArgumentForOperand.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLExitArgumentForOperand_h +#define FTLExitArgumentForOperand_h + +#if ENABLE(FTL_JIT) + +#include "FTLExitArgument.h" +#include "VirtualRegister.h" + +namespace JSC { namespace FTL { + +class ExitArgumentForOperand { +public: + ExitArgumentForOperand() + : m_operand(VirtualRegister()) + { + } + + ExitArgumentForOperand(ExitArgument exitArgument, VirtualRegister operand) + : m_exitArgument(exitArgument) + , m_operand(operand) + { + } + + bool operator!() const { return !m_exitArgument; } + + const ExitArgument& exitArgument() const { return m_exitArgument; } + VirtualRegister operand() const { return m_operand; } + + void dump(PrintStream&) const; + +private: + ExitArgument m_exitArgument; + VirtualRegister m_operand; +}; + +inline bool lesserArgumentIndex( + const ExitArgumentForOperand& a, const ExitArgumentForOperand& b) +{ + return a.exitArgument().argument() < b.exitArgument().argument(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLExitArgumentForOperand_h + diff --git a/Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp new file mode 100644 index 000000000..d57db747a --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLExitPropertyValue.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +ExitPropertyValue ExitPropertyValue::withLocalsOffset(int offset) const +{ + return ExitPropertyValue(m_location, m_value.withLocalsOffset(offset)); +} + +void ExitPropertyValue::dump(PrintStream& out) const +{ + out.print(m_location, " => ", m_value); +} + +void ExitPropertyValue::validateReferences(const TrackedReferences& trackedReferences) const +{ + m_value.validateReferences(trackedReferences); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLExitPropertyValue.h b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.h new file mode 100644 index 000000000..76fd00c59 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitPropertyValue.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLExitPropertyValue_h +#define FTLExitPropertyValue_h + +#if ENABLE(FTL_JIT) + +#include "DFGPromotedHeapLocation.h" +#include "FTLExitValue.h" + +namespace JSC { + +class TrackedReferences; + +namespace FTL { + +class ExitPropertyValue { +public: + ExitPropertyValue() + { + } + + ExitPropertyValue(DFG::PromotedLocationDescriptor location, const ExitValue& value) + : m_location(location) + , m_value(value) + { + ASSERT(!!location == !!value); + } + + bool operator!() const { return !m_location; } + + DFG::PromotedLocationDescriptor location() const { return m_location; } + const ExitValue& value() const { return m_value; } + + ExitPropertyValue withLocalsOffset(int offset) const; + + void dump(PrintStream& out) const; + + void validateReferences(const TrackedReferences&) const; + +private: + DFG::PromotedLocationDescriptor m_location; + ExitValue m_value; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLExitPropertyValue_h + diff --git a/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp new file mode 100644 index 000000000..858d1babc --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLExitTimeObjectMaterialization.h" + +#if ENABLE(FTL_JIT) + +#include "DFGGraph.h" + +namespace JSC { namespace FTL { + +using namespace JSC::DFG; + +ExitTimeObjectMaterialization::ExitTimeObjectMaterialization(NodeType type, CodeOrigin codeOrigin) + : m_type(type) + , m_origin(codeOrigin) +{ +} + +ExitTimeObjectMaterialization::~ExitTimeObjectMaterialization() +{ +} + +void ExitTimeObjectMaterialization::add( + PromotedLocationDescriptor location, const ExitValue& value) +{ + m_properties.append(ExitPropertyValue(location, value)); +} + +ExitValue ExitTimeObjectMaterialization::get(PromotedLocationDescriptor location) const +{ + for (ExitPropertyValue value : m_properties) { + if (value.location() == location) + return value.value(); + } + return ExitValue(); +} + +void ExitTimeObjectMaterialization::accountForLocalsOffset(int offset) +{ + for (ExitPropertyValue& property : m_properties) + property = property.withLocalsOffset(offset); +} + +void ExitTimeObjectMaterialization::dump(PrintStream& out) const +{ + out.print(RawPointer(this), ":", Graph::opName(m_type), "(", listDump(m_properties), ")"); +} + +void ExitTimeObjectMaterialization::validateReferences(const TrackedReferences& trackedReferences) const +{ + for (ExitPropertyValue value : m_properties) + value.validateReferences(trackedReferences); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h new file mode 100644 index 000000000..e2003c600 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitTimeObjectMaterialization.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLExitTimeObjectMaterialization_h +#define FTLExitTimeObjectMaterialization_h + +#if ENABLE(FTL_JIT) + +#include "DFGNodeType.h" +#include "FTLExitPropertyValue.h" +#include "FTLExitValue.h" +#include <wtf/Noncopyable.h> + +namespace JSC { + +class TrackedReferences; + +namespace FTL { + +class ExitTimeObjectMaterialization { + WTF_MAKE_NONCOPYABLE(ExitTimeObjectMaterialization) +public: + ExitTimeObjectMaterialization(DFG::NodeType, CodeOrigin); + ~ExitTimeObjectMaterialization(); + + void add(DFG::PromotedLocationDescriptor, const ExitValue&); + + DFG::NodeType type() const { return m_type; } + CodeOrigin origin() const { return m_origin; } + + ExitValue get(DFG::PromotedLocationDescriptor) const; + const Vector<ExitPropertyValue>& properties() const { return m_properties; } + + void accountForLocalsOffset(int offset); + + void dump(PrintStream& out) const; + + void validateReferences(const TrackedReferences&) const; + +private: + DFG::NodeType m_type; + CodeOrigin m_origin; + Vector<ExitPropertyValue> m_properties; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLExitTimeObjectMaterialization_h + diff --git a/Source/JavaScriptCore/ftl/FTLExitValue.cpp b/Source/JavaScriptCore/ftl/FTLExitValue.cpp new file mode 100644 index 000000000..7004b9bdb --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitValue.cpp @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLExitValue.h" + +#if ENABLE(FTL_JIT) + +#include "FTLExitTimeObjectMaterialization.h" +#include "JSCInlines.h" +#include "TrackedReferences.h" + +namespace JSC { namespace FTL { + +ExitValue ExitValue::materializeNewObject(ExitTimeObjectMaterialization* data) +{ + ExitValue result; + result.m_kind = ExitValueMaterializeNewObject; + result.u.newObjectMaterializationData = data; + return result; +} + +ExitValue ExitValue::withLocalsOffset(int offset) const +{ + if (!isInJSStackSomehow()) + return *this; + if (!virtualRegister().isLocal()) + return *this; + return withVirtualRegister(virtualRegister() + offset); +} + +DataFormat ExitValue::dataFormat() const +{ + switch (kind()) { + case InvalidExitValue: + RELEASE_ASSERT_NOT_REACHED(); + return DataFormatNone; + + case ExitValueDead: + case ExitValueConstant: + case ExitValueInJSStack: + case ExitValueMaterializeNewObject: + return DataFormatJS; + + case ExitValueArgument: + return exitArgument().format(); + + case ExitValueInJSStackAsInt32: + return DataFormatInt32; + + case ExitValueInJSStackAsInt52: + return DataFormatInt52; + + case ExitValueInJSStackAsDouble: + return DataFormatDouble; + + case ExitValueRecovery: + return recoveryFormat(); + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +void ExitValue::dumpInContext(PrintStream& out, DumpContext* context) const +{ + switch (kind()) { + case InvalidExitValue: + out.print("Invalid"); + return; + case ExitValueDead: + out.print("Dead"); + return; + case ExitValueArgument: + out.print("Argument(", exitArgument(), ")"); + return; + case ExitValueConstant: + out.print("Constant(", inContext(constant(), context), ")"); + return; + case ExitValueInJSStack: + out.print("InJSStack:", virtualRegister()); + return; + case ExitValueInJSStackAsInt32: + out.print("InJSStackAsInt32:", virtualRegister()); + return; + case ExitValueInJSStackAsInt52: + out.print("InJSStackAsInt52:", virtualRegister()); + return; + case ExitValueInJSStackAsDouble: + out.print("InJSStackAsDouble:", virtualRegister()); + return; + case ExitValueRecovery: + out.print("Recovery(", recoveryOpcode(), ", arg", leftRecoveryArgument(), ", arg", rightRecoveryArgument(), ", ", recoveryFormat(), ")"); + return; + case ExitValueMaterializeNewObject: + out.print("Materialize(", WTF::RawPointer(objectMaterialization()), ")"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +void ExitValue::dump(PrintStream& out) const +{ + dumpInContext(out, 0); +} + +void ExitValue::validateReferences(const TrackedReferences& trackedReferences) const +{ + if (isConstant()) + trackedReferences.check(constant()); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLExitValue.h b/Source/JavaScriptCore/ftl/FTLExitValue.h new file mode 100644 index 000000000..4684b5af1 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLExitValue.h @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLExitValue_h +#define FTLExitValue_h + +#if ENABLE(FTL_JIT) + +#include "FTLExitArgument.h" +#include "FTLRecoveryOpcode.h" +#include "JSCJSValue.h" +#include "VirtualRegister.h" +#include <wtf/PrintStream.h> + +namespace JSC { + +class TrackedReferences; + +namespace FTL { + +// This is like ValueRecovery, but respects the way that the FTL does OSR +// exit: the live non-constant non-flushed values are passed as arguments +// to a noreturn tail call. ExitValue is hence mostly responsible for +// telling us the mapping between operands in bytecode and the arguments to +// the call. + +enum ExitValueKind { + InvalidExitValue, + ExitValueDead, + ExitValueArgument, + ExitValueConstant, + ExitValueInJSStack, + ExitValueInJSStackAsInt32, + ExitValueInJSStackAsInt52, + ExitValueInJSStackAsDouble, + ExitValueRecovery, + ExitValueMaterializeNewObject +}; + +class ExitTimeObjectMaterialization; + +class ExitValue { +public: + ExitValue() + : m_kind(InvalidExitValue) + { + } + + bool operator!() const { return m_kind == InvalidExitValue; } + + static ExitValue dead() + { + ExitValue result; + result.m_kind = ExitValueDead; + return result; + } + + static ExitValue inJSStack(VirtualRegister reg) + { + ExitValue result; + result.m_kind = ExitValueInJSStack; + result.u.virtualRegister = reg.offset(); + return result; + } + + static ExitValue inJSStackAsInt32(VirtualRegister reg) + { + ExitValue result; + result.m_kind = ExitValueInJSStackAsInt32; + result.u.virtualRegister = reg.offset(); + return result; + } + + static ExitValue inJSStackAsInt52(VirtualRegister reg) + { + ExitValue result; + result.m_kind = ExitValueInJSStackAsInt52; + result.u.virtualRegister = reg.offset(); + return result; + } + + static ExitValue inJSStackAsDouble(VirtualRegister reg) + { + ExitValue result; + result.m_kind = ExitValueInJSStackAsDouble; + result.u.virtualRegister = reg.offset(); + return result; + } + + static ExitValue constant(JSValue value) + { + ExitValue result; + result.m_kind = ExitValueConstant; + result.u.constant = JSValue::encode(value); + return result; + } + + static ExitValue exitArgument(const ExitArgument& argument) + { + ExitValue result; + result.m_kind = ExitValueArgument; + result.u.argument = argument.representation(); + return result; + } + + static ExitValue recovery(RecoveryOpcode opcode, unsigned leftArgument, unsigned rightArgument, DataFormat format) + { + ExitValue result; + result.m_kind = ExitValueRecovery; + result.u.recovery.opcode = opcode; + result.u.recovery.leftArgument = leftArgument; + result.u.recovery.rightArgument = rightArgument; + result.u.recovery.format = format; + return result; + } + + static ExitValue materializeNewObject(ExitTimeObjectMaterialization*); + + ExitValueKind kind() const { return m_kind; } + + bool isDead() const { return kind() == ExitValueDead; } + bool isInJSStackSomehow() const + { + switch (kind()) { + case ExitValueInJSStack: + case ExitValueInJSStackAsInt32: + case ExitValueInJSStackAsInt52: + case ExitValueInJSStackAsDouble: + return true; + default: + return false; + } + } + bool isConstant() const { return kind() == ExitValueConstant; } + bool isArgument() const { return kind() == ExitValueArgument; } + bool isRecovery() const { return kind() == ExitValueRecovery; } + bool isObjectMaterialization() const { return kind() == ExitValueMaterializeNewObject; } + bool hasIndexInStackmapLocations() const { return isArgument() || isRecovery(); } + + ExitArgument exitArgument() const + { + ASSERT(isArgument()); + return ExitArgument(u.argument); + } + + unsigned leftRecoveryArgument() const + { + ASSERT(isRecovery()); + return u.recovery.leftArgument; + } + + unsigned rightRecoveryArgument() const + { + ASSERT(isRecovery()); + return u.recovery.rightArgument; + } + + void adjustStackmapLocationsIndexByOffset(unsigned offset) + { + ASSERT(hasIndexInStackmapLocations()); + if (isArgument()) + u.argument.argument += offset; + else { + ASSERT(isRecovery()); + u.recovery.rightArgument += offset; + u.recovery.leftArgument += offset; + } + } + + DataFormat recoveryFormat() const + { + ASSERT(isRecovery()); + return static_cast<DataFormat>(u.recovery.format); + } + + RecoveryOpcode recoveryOpcode() const + { + ASSERT(isRecovery()); + return static_cast<RecoveryOpcode>(u.recovery.opcode); + } + + JSValue constant() const + { + ASSERT(isConstant()); + return JSValue::decode(u.constant); + } + + VirtualRegister virtualRegister() const + { + ASSERT(isInJSStackSomehow()); + return VirtualRegister(u.virtualRegister); + } + + ExitTimeObjectMaterialization* objectMaterialization() const + { + ASSERT(isObjectMaterialization()); + return u.newObjectMaterializationData; + } + + ExitValue withVirtualRegister(VirtualRegister virtualRegister) const + { + ASSERT(isInJSStackSomehow()); + ExitValue result; + result.m_kind = m_kind; + result.u.virtualRegister = virtualRegister.offset(); + return result; + } + + ExitValue withLocalsOffset(int offset) const; + + // If it's in the JSStack somehow, this will tell you what format it's in, in a manner + // that is compatible with exitArgument().format(). If it's a constant or it's dead, it + // will claim to be a JSValue. If it's an argument then it will tell you the argument's + // format. + DataFormat dataFormat() const; + + void dump(PrintStream&) const; + void dumpInContext(PrintStream&, DumpContext*) const; + + void validateReferences(const TrackedReferences&) const; + +private: + ExitValueKind m_kind; + union { + ExitArgumentRepresentation argument; + EncodedJSValue constant; + int virtualRegister; + struct { + uint16_t leftArgument; + uint16_t rightArgument; + uint16_t opcode; + uint16_t format; + } recovery; + ExitTimeObjectMaterialization* newObjectMaterializationData; + } u; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLExitValue_h diff --git a/Source/JavaScriptCore/ftl/FTLFail.cpp b/Source/JavaScriptCore/ftl/FTLFail.cpp new file mode 100644 index 000000000..5c6426a54 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLFail.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLFail.h" + +#if ENABLE(FTL_JIT) + +#include "DFGFailedFinalizer.h" +#include "FTLJITCode.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +void fail(State& state) +{ + state.graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(state.graph.m_plan); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLFail.h b/Source/JavaScriptCore/ftl/FTLFail.h new file mode 100644 index 000000000..e99e1a4ae --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLFail.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLFail_h +#define FTLFail_h + +#if ENABLE(FTL_JIT) + +#include "FTLState.h" + +namespace JSC { namespace FTL { + +void fail(State&); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLFail_h + diff --git a/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp new file mode 100644 index 000000000..3080dc345 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLForOSREntryJITCode.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +ForOSREntryJITCode::ForOSREntryJITCode() + : m_bytecodeIndex(UINT_MAX) + , m_entryFailureCount(0) +{ +} + +ForOSREntryJITCode::~ForOSREntryJITCode() +{ +} + +ForOSREntryJITCode* ForOSREntryJITCode::ftlForOSREntry() +{ + return this; +} + +void ForOSREntryJITCode::initializeEntryBuffer(VM& vm, unsigned numCalleeLocals) +{ + m_entryBuffer = vm.scratchBufferForSize(numCalleeLocals * sizeof(EncodedJSValue)); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h new file mode 100644 index 000000000..9ae973f40 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLForOSREntryJITCode.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLForOSREntryJITCode_h +#define FTLForOSREntryJITCode_h + +#if ENABLE(FTL_JIT) + +#include "FTLJITCode.h" + +namespace JSC { namespace FTL { + +// OSR entry into the FTL has a number of quirks: +// +// - OSR entry only happens through special OSR entry compilations. They have their +// own CodeBlock and their own JITCode. +// +// - We only OSR enter in loop headers that have a null inline call frame. +// +// - Each OSR entry compilation allows entry through only one bytecode index. + +class ForOSREntryJITCode : public FTL::JITCode { +public: + ForOSREntryJITCode(); + ~ForOSREntryJITCode(); + + void initializeEntryBuffer(VM&, unsigned numCalleeLocals); + ScratchBuffer* entryBuffer() const { return m_entryBuffer; } + + void setBytecodeIndex(unsigned value) { m_bytecodeIndex = value; } + unsigned bytecodeIndex() const { return m_bytecodeIndex; } + + void countEntryFailure() { m_entryFailureCount++; } + unsigned entryFailureCount() const { return m_entryFailureCount; } + + ForOSREntryJITCode* ftlForOSREntry(); + +private: + ScratchBuffer* m_entryBuffer; // Only for OSR entry code blocks. + unsigned m_bytecodeIndex; + unsigned m_entryFailureCount; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FLT_JIT) + +#endif // FTLForOSREntryJITCode_h + diff --git a/Source/JavaScriptCore/ftl/FTLFormattedValue.h b/Source/JavaScriptCore/ftl/FTLFormattedValue.h new file mode 100644 index 000000000..a4d01ea79 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLFormattedValue.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLFormattedValue_h +#define FTLFormattedValue_h + +#if ENABLE(FTL_JIT) + +#include "DataFormat.h" +#include "FTLAbbreviatedTypes.h" + +namespace JSC { namespace FTL { + +// This class is mostly used for OSR; it's a way of specifying how a value is formatted +// in cases where it wouldn't have been obvious from looking at other indicators (like +// the type of the B3::Value* or the type of the DFG::Node). + +class FormattedValue { +public: + FormattedValue() + : m_format(DataFormatNone) + , m_value(0) + { + } + + FormattedValue(DataFormat format, LValue value) + : m_format(format) + , m_value(value) + { + } + + bool operator!() const + { + ASSERT((m_format == DataFormatNone) == !m_value); + return m_format == DataFormatNone; + } + + DataFormat format() const { return m_format; } + LValue value() const { return m_value; } + +private: + DataFormat m_format; + LValue m_value; +}; + +static inline FormattedValue noValue() { return FormattedValue(); } +static inline FormattedValue int32Value(LValue value) { return FormattedValue(DataFormatInt32, value); } +static inline FormattedValue booleanValue(LValue value) { return FormattedValue(DataFormatBoolean, value); } +static inline FormattedValue jsValueValue(LValue value) { return FormattedValue(DataFormatJS, value); } +static inline FormattedValue doubleValue(LValue value) { return FormattedValue(DataFormatDouble, value); } + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLFormattedValue_h + diff --git a/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h b/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h new file mode 100644 index 000000000..6f56a3654 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLGeneratedFunction.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLGeneratedFunction_h +#define FTLGeneratedFunction_h + +#if ENABLE(FTL_JIT) + +#include "CallFrame.h" + +namespace JSC { namespace FTL { + +typedef EncodedJSValue (*GeneratedFunction)(ExecState*); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLGeneratedFunction_h diff --git a/Source/JavaScriptCore/ftl/FTLJITCode.cpp b/Source/JavaScriptCore/ftl/FTLJITCode.cpp new file mode 100644 index 000000000..65e6c0052 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLJITCode.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLJITCode.h" + +#if ENABLE(FTL_JIT) + +#include "FTLState.h" + +namespace JSC { namespace FTL { + +using namespace B3; + +JITCode::JITCode() + : JSC::JITCode(FTLJIT) +{ +} + +JITCode::~JITCode() +{ + if (FTL::shouldDumpDisassembly()) { + dataLog("Destroying FTL JIT code at "); + CommaPrinter comma; + dataLog(comma, m_b3Code); + dataLog(comma, m_arityCheckEntrypoint); + } +} + +void JITCode::initializeB3Code(CodeRef b3Code) +{ + m_b3Code = b3Code; +} + +void JITCode::initializeB3Byproducts(std::unique_ptr<OpaqueByproducts> byproducts) +{ + m_b3Byproducts = WTFMove(byproducts); +} + +void JITCode::initializeAddressForCall(CodePtr address) +{ + m_addressForCall = address; +} + +void JITCode::initializeArityCheckEntrypoint(CodeRef entrypoint) +{ + m_arityCheckEntrypoint = entrypoint; +} + +JITCode::CodePtr JITCode::addressForCall(ArityCheckMode arityCheck) +{ + switch (arityCheck) { + case ArityCheckNotRequired: + return m_addressForCall; + case MustCheckArity: + return m_arityCheckEntrypoint.code(); + } + RELEASE_ASSERT_NOT_REACHED(); + return CodePtr(); +} + +void* JITCode::executableAddressAtOffset(size_t offset) +{ + return reinterpret_cast<char*>(m_addressForCall.executableAddress()) + offset; +} + +void* JITCode::dataAddressAtOffset(size_t) +{ + // We can't patch FTL code, yet. Even if we did, it's not clear that we would do so + // through this API. + RELEASE_ASSERT_NOT_REACHED(); + return 0; +} + +unsigned JITCode::offsetOf(void*) +{ + // We currently don't have visibility into the FTL code. + RELEASE_ASSERT_NOT_REACHED(); + return 0; +} + +size_t JITCode::size() +{ + // We don't know the size of FTL code, yet. Make a wild guess. This is mostly used for + // GC load estimates. + return 1000; +} + +bool JITCode::contains(void*) +{ + // We have no idea what addresses the FTL code contains, yet. + RELEASE_ASSERT_NOT_REACHED(); + return false; +} + +JITCode* JITCode::ftl() +{ + return this; +} + +DFG::CommonData* JITCode::dfgCommon() +{ + return &common; +} + +void JITCode::validateReferences(const TrackedReferences& trackedReferences) +{ + common.validateReferences(trackedReferences); + + for (OSRExit& exit : osrExit) + exit.m_descriptor->validateReferences(trackedReferences); +} + +RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex callSiteIndex) +{ + for (OSRExit& exit : osrExit) { + if (exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) { + RELEASE_ASSERT(exit.isExceptionHandler()); + RELEASE_ASSERT(exit.isGenericUnwindHandler()); + return ValueRep::usedRegisters(exit.m_valueReps); + } + } + return RegisterSet(); +} + +Optional<CodeOrigin> JITCode::findPC(CodeBlock* codeBlock, void* pc) +{ + for (OSRExit& exit : osrExit) { + if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) { + if (handle->start() <= pc && pc < handle->end()) + return Optional<CodeOrigin>(exit.m_codeOriginForExitProfile); + } + } + + for (std::unique_ptr<LazySlowPath>& lazySlowPath : lazySlowPaths) { + if (ExecutableMemoryHandle* handle = lazySlowPath->stub().executableMemory()) { + if (handle->start() <= pc && pc < handle->end()) + return Optional<CodeOrigin>(codeBlock->codeOrigin(lazySlowPath->callSiteIndex())); + } + } + + return Nullopt; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLJITCode.h b/Source/JavaScriptCore/ftl/FTLJITCode.h new file mode 100644 index 000000000..6f90545c9 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLJITCode.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLJITCode_h +#define FTLJITCode_h + +#if ENABLE(FTL_JIT) + +#include "B3OpaqueByproducts.h" +#include "DFGCommonData.h" +#include "FTLLazySlowPath.h" +#include "FTLOSRExit.h" +#include "JITCode.h" +#include <wtf/RefCountedArray.h> + +namespace JSC { + +class TrackedReferences; + +namespace FTL { + +class JITCode : public JSC::JITCode { +public: + JITCode(); + ~JITCode(); + + CodePtr addressForCall(ArityCheckMode) override; + void* executableAddressAtOffset(size_t offset) override; + void* dataAddressAtOffset(size_t offset) override; + unsigned offsetOf(void* pointerIntoCode) override; + size_t size() override; + bool contains(void*) override; + + void initializeB3Code(CodeRef); + void initializeB3Byproducts(std::unique_ptr<B3::OpaqueByproducts>); + void initializeAddressForCall(CodePtr); + void initializeArityCheckEntrypoint(CodeRef); + + void validateReferences(const TrackedReferences&) override; + + RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex) override; + + Optional<CodeOrigin> findPC(CodeBlock*, void* pc) override; + + CodeRef b3Code() const { return m_b3Code; } + + JITCode* ftl() override; + DFG::CommonData* dfgCommon() override; + static ptrdiff_t commonDataOffset() { return OBJECT_OFFSETOF(JITCode, common); } + + DFG::CommonData common; + SegmentedVector<OSRExit, 8> osrExit; + SegmentedVector<OSRExitDescriptor, 8> osrExitDescriptors; + Vector<std::unique_ptr<LazySlowPath>> lazySlowPaths; + +private: + CodePtr m_addressForCall; + CodeRef m_b3Code; + std::unique_ptr<B3::OpaqueByproducts> m_b3Byproducts; + CodeRef m_arityCheckEntrypoint; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FLT_JIT) + +#endif // FTLJITCode_h + diff --git a/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp new file mode 100644 index 000000000..56bd9ee5e --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLJITFinalizer.h" + +#if ENABLE(FTL_JIT) + +#include "CodeBlockWithJITType.h" +#include "DFGPlan.h" +#include "FTLState.h" +#include "FTLThunks.h" +#include "ProfilerDatabase.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +JITFinalizer::JITFinalizer(Plan& plan) + : Finalizer(plan) +{ +} + +JITFinalizer::~JITFinalizer() +{ +} + +size_t JITFinalizer::codeSize() +{ + size_t result = 0; + + if (b3CodeLinkBuffer) + result += b3CodeLinkBuffer->size(); + + if (entrypointLinkBuffer) + result += entrypointLinkBuffer->size(); + + return result; +} + +bool JITFinalizer::finalize() +{ + RELEASE_ASSERT_NOT_REACHED(); + return false; +} + +bool JITFinalizer::finalizeFunction() +{ + bool dumpDisassembly = shouldDumpDisassembly() || Options::asyncDisassembly(); + + jitCode->initializeB3Code( + FINALIZE_CODE_IF( + dumpDisassembly, *b3CodeLinkBuffer, + ("FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data()))); + + jitCode->initializeArityCheckEntrypoint( + FINALIZE_CODE_IF( + dumpDisassembly, *entrypointLinkBuffer, + ("FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock, JITCode::FTLJIT)).data(), function))); + + m_plan.codeBlock->setJITCode(jitCode); + + if (m_plan.compilation) + m_plan.vm.m_perBytecodeProfiler->addCompilation(m_plan.compilation); + + return true; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLJITFinalizer.h b/Source/JavaScriptCore/ftl/FTLJITFinalizer.h new file mode 100644 index 000000000..0574f5327 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLJITFinalizer.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLJITFinalizer_h +#define FTLJITFinalizer_h + +#if ENABLE(FTL_JIT) + +#include "DFGFinalizer.h" +#include "FTLGeneratedFunction.h" +#include "FTLJITCode.h" +#include "FTLSlowPathCall.h" +#include "LinkBuffer.h" +#include "MacroAssembler.h" + +namespace JSC { namespace FTL { + +class OutOfLineCodeInfo { +public: + OutOfLineCodeInfo(std::unique_ptr<LinkBuffer> linkBuffer, const char* codeDescription) + : m_linkBuffer(WTFMove(linkBuffer)) + , m_codeDescription(codeDescription) + { + } + + std::unique_ptr<LinkBuffer> m_linkBuffer; + const char* m_codeDescription; +}; + +class JITFinalizer : public DFG::Finalizer { +public: + JITFinalizer(DFG::Plan&); + virtual ~JITFinalizer(); + + size_t codeSize() override; + bool finalize() override; + bool finalizeFunction() override; + + std::unique_ptr<LinkBuffer> b3CodeLinkBuffer; + + // Eventually, we can get rid of this with B3. + std::unique_ptr<LinkBuffer> entrypointLinkBuffer; + + Vector<CCallHelpers::Jump> lazySlowPathGeneratorJumps; + GeneratedFunction function; + RefPtr<JITCode> jitCode; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLJITFinalizer_h + diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp b/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp new file mode 100644 index 000000000..9bdc06da8 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLazySlowPath.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLLazySlowPath.h" + +#if ENABLE(FTL_JIT) + +#include "FTLSlowPathCall.h" +#include "LinkBuffer.h" + +namespace JSC { namespace FTL { + +LazySlowPath::LazySlowPath( + CodeLocationJump patchableJump, CodeLocationLabel done, + CodeLocationLabel exceptionTarget, + const RegisterSet& usedRegisters, CallSiteIndex callSiteIndex, RefPtr<Generator> generator + ) + : m_patchableJump(patchableJump) + , m_done(done) + , m_exceptionTarget(exceptionTarget) + , m_usedRegisters(usedRegisters) + , m_callSiteIndex(callSiteIndex) + , m_generator(generator) +{ +} + +LazySlowPath::~LazySlowPath() +{ +} + +void LazySlowPath::generate(CodeBlock* codeBlock) +{ + RELEASE_ASSERT(!m_stub); + + VM& vm = *codeBlock->vm(); + + CCallHelpers jit(&vm, codeBlock); + GenerationParams params; + CCallHelpers::JumpList exceptionJumps; + params.exceptionJumps = m_exceptionTarget ? &exceptionJumps : nullptr; + params.lazySlowPath = this; + + m_generator->run(jit, params); + + LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationMustSucceed); + linkBuffer.link(params.doneJumps, m_done); + if (m_exceptionTarget) + linkBuffer.link(exceptionJumps, m_exceptionTarget); + m_stub = FINALIZE_CODE_FOR(codeBlock, linkBuffer, ("Lazy slow path call stub")); + + MacroAssembler::repatchJump(m_patchableJump, CodeLocationLabel(m_stub.code())); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPath.h b/Source/JavaScriptCore/ftl/FTLLazySlowPath.h new file mode 100644 index 000000000..5105cc558 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLazySlowPath.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLLazySlowPath_h +#define FTLLazySlowPath_h + +#if ENABLE(FTL_JIT) + +#include "CCallHelpers.h" +#include "CodeBlock.h" +#include "CodeLocation.h" +#include "GPRInfo.h" +#include "MacroAssemblerCodeRef.h" +#include "RegisterSet.h" +#include "ScratchRegisterAllocator.h" +#include <wtf/SharedTask.h> + +namespace JSC { namespace FTL { + +// A LazySlowPath is an object that represents a piece of code that is part of FTL generated code +// that will be generated lazily. It holds all of the important information needed to generate that +// code, such as where to link jumps to and which registers are in use. It also has a reference to a +// SharedTask that will do the actual code generation. That SharedTask may have additional data, like +// which registers hold the inputs or outputs. +class LazySlowPath { + WTF_MAKE_NONCOPYABLE(LazySlowPath); + WTF_MAKE_FAST_ALLOCATED; +public: + struct GenerationParams { + // Extra parameters to the GeneratorFunction are made into fields of this struct, so that if + // we add new parameters, we don't have to change all of the users. + CCallHelpers::JumpList doneJumps; + CCallHelpers::JumpList* exceptionJumps; + LazySlowPath* lazySlowPath; + }; + + typedef void GeneratorFunction(CCallHelpers&, GenerationParams&); + typedef SharedTask<GeneratorFunction> Generator; + + template<typename Functor> + static RefPtr<Generator> createGenerator(const Functor& functor) + { + return createSharedTask<GeneratorFunction>(functor); + } + + LazySlowPath( + CodeLocationJump patchableJump, CodeLocationLabel done, + CodeLocationLabel exceptionTarget, const RegisterSet& usedRegisters, + CallSiteIndex, RefPtr<Generator> + ); + + ~LazySlowPath(); + + CodeLocationJump patchableJump() const { return m_patchableJump; } + CodeLocationLabel done() const { return m_done; } + const RegisterSet& usedRegisters() const { return m_usedRegisters; } + CallSiteIndex callSiteIndex() const { return m_callSiteIndex; } + + void generate(CodeBlock*); + + MacroAssemblerCodeRef stub() const { return m_stub; } + +private: + CodeLocationJump m_patchableJump; + CodeLocationLabel m_done; + CodeLocationLabel m_exceptionTarget; + RegisterSet m_usedRegisters; + CallSiteIndex m_callSiteIndex; + MacroAssemblerCodeRef m_stub; + RefPtr<Generator> m_generator; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLLazySlowPath_h + diff --git a/Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h b/Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h new file mode 100644 index 000000000..2b4d8041e --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLazySlowPathCall.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLLazySlowPathCall_h +#define FTLLazySlowPathCall_h + +#include "CodeBlock.h" +#include "CodeLocation.h" +#include "FTLLazySlowPath.h" +#include "FTLSlowPathCall.h" +#include "FTLThunks.h" +#include "GPRInfo.h" +#include "MacroAssemblerCodeRef.h" +#include "RegisterSet.h" + +namespace JSC { namespace FTL { + +template<typename ResultType, typename... ArgumentTypes> +RefPtr<LazySlowPath::Generator> createLazyCallGenerator( + FunctionPtr function, ResultType result, ArgumentTypes... arguments) +{ + return LazySlowPath::createGenerator( + [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) { + callOperation( + params.lazySlowPath->usedRegisters(), jit, params.lazySlowPath->callSiteIndex(), + params.exceptionJumps, function, result, arguments...); + params.doneJumps.append(jit.jump()); + }); +} + +} } // namespace JSC::FTL + +#endif // FTLLazySlowPathCall_h + diff --git a/Source/JavaScriptCore/ftl/FTLLink.cpp b/Source/JavaScriptCore/ftl/FTLLink.cpp new file mode 100644 index 000000000..5b2bb67c7 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLink.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLLink.h" + +#if ENABLE(FTL_JIT) + +#include "CCallHelpers.h" +#include "CodeBlockWithJITType.h" +#include "DFGCommon.h" +#include "FTLJITCode.h" +#include "JITOperations.h" +#include "LinkBuffer.h" +#include "JSCInlines.h" +#include "ProfilerCompilation.h" +#include "VirtualRegister.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +void link(State& state) +{ + Graph& graph = state.graph; + CodeBlock* codeBlock = graph.m_codeBlock; + VM& vm = graph.m_vm; + + // B3 will create its own jump tables as needed. + codeBlock->clearSwitchJumpTables(); + + state.jitCode->common.requiredRegisterCountForExit = graph.requiredRegisterCountForExit(); + + if (!graph.m_plan.inlineCallFrames->isEmpty()) + state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames; + + graph.registerFrozenValues(); + + // Create the entrypoint. Note that we use this entrypoint totally differently + // depending on whether we're doing OSR entry or not. + CCallHelpers jit(&vm, codeBlock); + + std::unique_ptr<LinkBuffer> linkBuffer; + + CCallHelpers::Address frame = CCallHelpers::Address( + CCallHelpers::stackPointerRegister, -static_cast<int32_t>(AssemblyHelpers::prologueStackPointerDelta())); + + if (Profiler::Compilation* compilation = graph.compilation()) { + compilation->addDescription( + Profiler::OriginStack(), + toCString("Generated FTL JIT code for ", CodeBlockWithJITType(codeBlock, JITCode::FTLJIT), ", instruction count = ", graph.m_codeBlock->instructionCount(), ":\n")); + + graph.ensureDominators(); + graph.ensureNaturalLoops(); + + const char* prefix = " "; + + DumpContext dumpContext; + StringPrintStream out; + Node* lastNode = 0; + for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) { + BasicBlock* block = graph.block(blockIndex); + if (!block) + continue; + + graph.dumpBlockHeader(out, prefix, block, Graph::DumpLivePhisOnly, &dumpContext); + compilation->addDescription(Profiler::OriginStack(), out.toCString()); + out.reset(); + + for (size_t nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { + Node* node = block->at(nodeIndex); + + Profiler::OriginStack stack; + + if (node->origin.semantic.isSet()) { + stack = Profiler::OriginStack( + *vm.m_perBytecodeProfiler, codeBlock, node->origin.semantic); + } + + if (graph.dumpCodeOrigin(out, prefix, lastNode, node, &dumpContext)) { + compilation->addDescription(stack, out.toCString()); + out.reset(); + } + + graph.dump(out, prefix, node, &dumpContext); + compilation->addDescription(stack, out.toCString()); + out.reset(); + + if (node->origin.semantic.isSet()) + lastNode = node; + } + } + + dumpContext.dump(out, prefix); + compilation->addDescription(Profiler::OriginStack(), out.toCString()); + out.reset(); + + out.print(" Disassembly:\n"); + out.print(" <not implemented yet>\n"); + compilation->addDescription(Profiler::OriginStack(), out.toCString()); + out.reset(); + + state.jitCode->common.compilation = compilation; + } + + switch (graph.m_plan.mode) { + case FTLMode: { + CCallHelpers::JumpList mainPathJumps; + + jit.load32( + frame.withOffset(sizeof(Register) * JSStack::ArgumentCount), + GPRInfo::regT1); + mainPathJumps.append(jit.branch32( + CCallHelpers::AboveOrEqual, GPRInfo::regT1, + CCallHelpers::TrustedImm32(codeBlock->numParameters()))); + jit.emitFunctionPrologue(); + jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + jit.store32( + CCallHelpers::TrustedImm32(CallSiteIndex(0).bits()), + CCallHelpers::tagFor(JSStack::ArgumentCount)); + jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame); + CCallHelpers::Call callArityCheck = jit.call(); +#if !ASSERT_DISABLED + // FIXME: need to make this call register with exception handling somehow. This is + // part of a bigger problem: FTL should be able to handle exceptions. + // https://bugs.webkit.org/show_bug.cgi?id=113622 + // Until then, use a JIT ASSERT. + jit.load64(vm.addressOfException(), GPRInfo::regT1); + jit.jitAssertIsNull(GPRInfo::regT1); +#endif + jit.move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); + jit.emitFunctionEpilogue(); + mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0)); + jit.emitFunctionPrologue(); + CCallHelpers::Call callArityFixup = jit.call(); + jit.emitFunctionEpilogue(); + mainPathJumps.append(jit.jump()); + + linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail); + if (linkBuffer->didFailToAllocate()) { + state.allocationFailed = true; + return; + } + linkBuffer->link(callArityCheck, codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); + linkBuffer->link(callArityFixup, FunctionPtr((vm.getCTIStub(arityFixupGenerator)).code().executableAddress())); + linkBuffer->link(mainPathJumps, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction))); + + state.jitCode->initializeAddressForCall(MacroAssemblerCodePtr(bitwise_cast<void*>(state.generatedFunction))); + break; + } + + case FTLForOSREntryMode: { + // We jump to here straight from DFG code, after having boxed up all of the + // values into the scratch buffer. Everything should be good to go - at this + // point we've even done the stack check. Basically we just have to make the + // call to the B3-generated code. + CCallHelpers::Label start = jit.label(); + jit.emitFunctionEpilogue(); + CCallHelpers::Jump mainPathJump = jit.jump(); + + linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail); + if (linkBuffer->didFailToAllocate()) { + state.allocationFailed = true; + return; + } + linkBuffer->link(mainPathJump, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction))); + + state.jitCode->initializeAddressForCall(linkBuffer->locationOf(start)); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + + state.finalizer->entrypointLinkBuffer = WTFMove(linkBuffer); + state.finalizer->function = state.generatedFunction; + state.finalizer->jitCode = state.jitCode; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLLink.h b/Source/JavaScriptCore/ftl/FTLLink.h new file mode 100644 index 000000000..0103f2f73 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLink.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLLink_h +#define FTLLink_h + +#if ENABLE(FTL_JIT) + +#include "FTLState.h" + +namespace JSC { namespace FTL { + +void link(State&); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLLink_h + diff --git a/Source/JavaScriptCore/ftl/FTLLocation.cpp b/Source/JavaScriptCore/ftl/FTLLocation.cpp new file mode 100644 index 000000000..ac8f23ef8 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLocation.cpp @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLLocation.h" + +#if ENABLE(FTL_JIT) + +#include "B3ValueRep.h" +#include "FTLSaveRestore.h" +#include "RegisterSet.h" +#include <wtf/CommaPrinter.h> +#include <wtf/DataLog.h> +#include <wtf/ListDump.h> + +namespace JSC { namespace FTL { + +using namespace B3; + +Location Location::forValueRep(const ValueRep& rep) +{ + switch (rep.kind()) { + case ValueRep::Register: + return forRegister(rep.reg(), 0); + case ValueRep::Stack: + return forIndirect(GPRInfo::callFrameRegister, rep.offsetFromFP()); + case ValueRep::Constant: + return forConstant(rep.value()); + default: + RELEASE_ASSERT_NOT_REACHED(); + return Location(); + } +} + +void Location::dump(PrintStream& out) const +{ + out.print("(", kind()); + if (hasReg()) + out.print(", ", reg()); + if (hasOffset()) + out.print(", ", offset()); + if (hasAddend()) + out.print(", ", addend()); + if (hasConstant()) + out.print(", ", constant()); + out.print(")"); +} + +bool Location::involvesGPR() const +{ + return isGPR() || kind() == Indirect; +} + +bool Location::isGPR() const +{ + return kind() == Register && reg().isGPR(); +} + +GPRReg Location::gpr() const +{ + return reg().gpr(); +} + +bool Location::isFPR() const +{ + return kind() == Register && reg().isFPR(); +} + +FPRReg Location::fpr() const +{ + return reg().fpr(); +} + +void Location::restoreInto(MacroAssembler& jit, char* savedRegisters, GPRReg result, unsigned numFramesToPop) const +{ + if (involvesGPR() && RegisterSet::stackRegisters().get(gpr())) { + // Make the result GPR contain the appropriate stack register. + if (numFramesToPop) { + jit.move(MacroAssembler::framePointerRegister, result); + + for (unsigned i = numFramesToPop - 1; i--;) + jit.loadPtr(result, result); + + if (gpr() == MacroAssembler::framePointerRegister) + jit.loadPtr(result, result); + else + jit.addPtr(MacroAssembler::TrustedImmPtr(sizeof(void*) * 2), result); + } else + jit.move(gpr(), result); + } + + if (isGPR()) { + if (RegisterSet::stackRegisters().get(gpr())) { + // Already restored into result. + } else + jit.load64(savedRegisters + offsetOfGPR(gpr()), result); + + if (addend()) + jit.add64(MacroAssembler::TrustedImm32(addend()), result); + return; + } + + if (isFPR()) { + jit.load64(savedRegisters + offsetOfFPR(fpr()), result); + ASSERT(!addend()); + return; + } + + switch (kind()) { + case Register: + // B3 used some register that we don't know about! + dataLog("Unrecognized location: ", *this, "\n"); + RELEASE_ASSERT_NOT_REACHED(); + return; + + case Indirect: + if (RegisterSet::stackRegisters().get(gpr())) { + // The stack register is already recovered into result. + jit.load64(MacroAssembler::Address(result, offset()), result); + return; + } + + jit.load64(savedRegisters + offsetOfGPR(gpr()), result); + jit.load64(MacroAssembler::Address(result, offset()), result); + return; + + case Constant: + jit.move(MacroAssembler::TrustedImm64(constant()), result); + return; + + case Unprocessed: + RELEASE_ASSERT_NOT_REACHED(); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +GPRReg Location::directGPR() const +{ + RELEASE_ASSERT(!addend()); + return gpr(); +} + +} } // namespace JSC::FTL + +namespace WTF { + +using namespace JSC::FTL; + +void printInternal(PrintStream& out, JSC::FTL::Location::Kind kind) +{ + switch (kind) { + case Location::Unprocessed: + out.print("Unprocessed"); + return; + case Location::Register: + out.print("Register"); + return; + case Location::Indirect: + out.print("Indirect"); + return; + case Location::Constant: + out.print("Constant"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLLocation.h b/Source/JavaScriptCore/ftl/FTLLocation.h new file mode 100644 index 000000000..11868dd60 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLocation.h @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLLocation_h +#define FTLLocation_h + +#if ENABLE(FTL_JIT) + +#include "DFGCommon.h" +#include "FPRInfo.h" +#include "GPRInfo.h" +#include "Reg.h" +#include <wtf/HashMap.h> + +namespace JSC { + +namespace B3 { +class ValueRep; +} // namespace B3 + +namespace FTL { + +class Location { +public: + enum Kind { + Unprocessed, + Register, + Indirect, + Constant + }; + + Location() + : m_kind(Unprocessed) + { + u.constant = 0; + } + + Location(WTF::HashTableDeletedValueType) + : m_kind(Unprocessed) + { + u.constant = 1; + } + + static Location forRegister(Reg reg, int32_t addend) + { + Location result; + result.m_kind = Register; + result.u.variable.regIndex = reg.index(); + result.u.variable.offset = addend; + return result; + } + + static Location forIndirect(Reg reg, int32_t offset) + { + Location result; + result.m_kind = Indirect; + result.u.variable.regIndex = reg.index(); + result.u.variable.offset = offset; + return result; + } + + static Location forConstant(int64_t constant) + { + Location result; + result.m_kind = Constant; + result.u.constant = constant; + return result; + } + + static Location forValueRep(const B3::ValueRep&); + + Kind kind() const { return m_kind; } + + bool hasReg() const { return kind() == Register || kind() == Indirect; } + Reg reg() const + { + ASSERT(hasReg()); + return Reg::fromIndex(u.variable.regIndex); + } + + bool hasOffset() const { return kind() == Indirect; } + int32_t offset() const + { + ASSERT(hasOffset()); + return u.variable.offset; + } + + bool hasAddend() const { return kind() == Register; } + int32_t addend() const + { + ASSERT(hasAddend()); + return u.variable.offset; + } + + bool hasConstant() const { return kind() == Constant; } + int64_t constant() const + { + ASSERT(hasConstant()); + return u.constant; + } + + explicit operator bool() const { return kind() != Unprocessed || u.variable.offset; } + + bool operator!() const { return !static_cast<bool>(*this); } + + bool isHashTableDeletedValue() const { return kind() == Unprocessed && u.variable.offset; } + + bool operator==(const Location& other) const + { + return m_kind == other.m_kind + && u.constant == other.u.constant; + } + + unsigned hash() const + { + unsigned result = m_kind; + + switch (kind()) { + case Unprocessed: + result ^= u.variable.offset; + break; + + case Register: + result ^= u.variable.regIndex; + break; + + case Indirect: + result ^= u.variable.regIndex; + result ^= u.variable.offset; + break; + + case Constant: + result ^= WTF::IntHash<int64_t>::hash(u.constant); + break; + } + + return WTF::IntHash<unsigned>::hash(result); + } + + void dump(PrintStream&) const; + + bool isGPR() const; + bool involvesGPR() const; + GPRReg gpr() const; + GPRReg directGPR() const; // Get the GPR and assert that there is no addend. + + bool isFPR() const; + FPRReg fpr() const; + + // Assuming that all registers are saved to the savedRegisters buffer according + // to FTLSaveRestore convention, this loads the value into the given register. + // The code that this generates isn't exactly super fast. This assumes that FP + // and SP contain the same values that they would have contained in the original + // frame, or that you've done one or more canonically formed calls (i.e. can + // restore the FP by following the call frame linked list numFramesToPop times, + // and SP can be recovered by popping FP numFramesToPop-1 times and adding 16). + void restoreInto(MacroAssembler&, char* savedRegisters, GPRReg result, unsigned numFramesToPop = 0) const; + +private: + Kind m_kind; + union { + int64_t constant; + struct { + unsigned regIndex; + int32_t offset; + } variable; + } u; +}; + +struct LocationHash { + static unsigned hash(const Location& key) { return key.hash(); } + static bool equal(const Location& a, const Location& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::FTL + +namespace WTF { + +void printInternal(PrintStream&, JSC::FTL::Location::Kind); + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::FTL::Location> { + typedef JSC::FTL::LocationHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::FTL::Location> : SimpleClassHashTraits<JSC::FTL::Location> { }; + +} // namespace WTF + +#endif // ENABLE(FTL_JIT) + +#endif // FTLLocation_h + diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp new file mode 100644 index 000000000..c0d04c549 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp @@ -0,0 +1,10668 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLLowerDFGToB3.h" + +#if ENABLE(FTL_JIT) + +#include "AirGenerationContext.h" +#include "AllowMacroScratchRegisterUsage.h" +#include "B3StackmapGenerationParams.h" +#include "CallFrameShuffler.h" +#include "CodeBlockWithJITType.h" +#include "DFGAbstractInterpreterInlines.h" +#include "DFGDominators.h" +#include "DFGInPlaceAbstractState.h" +#include "DFGOSRAvailabilityAnalysisPhase.h" +#include "DFGOSRExitFuzz.h" +#include "DirectArguments.h" +#include "FTLAbstractHeapRepository.h" +#include "FTLAvailableRecovery.h" +#include "FTLExceptionTarget.h" +#include "FTLForOSREntryJITCode.h" +#include "FTLFormattedValue.h" +#include "FTLLazySlowPathCall.h" +#include "FTLLoweredNodeValue.h" +#include "FTLOperations.h" +#include "FTLOutput.h" +#include "FTLPatchpointExceptionHandle.h" +#include "FTLThunks.h" +#include "FTLWeightedTarget.h" +#include "JITAddGenerator.h" +#include "JITBitAndGenerator.h" +#include "JITBitOrGenerator.h" +#include "JITBitXorGenerator.h" +#include "JITDivGenerator.h" +#include "JITInlineCacheGenerator.h" +#include "JITLeftShiftGenerator.h" +#include "JITMulGenerator.h" +#include "JITRightShiftGenerator.h" +#include "JITSubGenerator.h" +#include "JSCInlines.h" +#include "JSGeneratorFunction.h" +#include "JSLexicalEnvironment.h" +#include "OperandsInlines.h" +#include "ScopedArguments.h" +#include "ScopedArgumentsTable.h" +#include "ScratchRegisterAllocator.h" +#include "SetupVarargsFrame.h" +#include "VirtualRegister.h" +#include "Watchdog.h" +#include <atomic> +#if !OS(WINDOWS) +#include <dlfcn.h> +#endif +#include <unordered_set> +#include <wtf/Box.h> +#include <wtf/ProcessID.h> + +namespace JSC { namespace FTL { + +using namespace B3; +using namespace DFG; + +namespace { + +std::atomic<int> compileCounter; + +#if ASSERT_DISABLED +NO_RETURN_DUE_TO_CRASH static void ftlUnreachable() +{ + CRASH(); +} +#else +NO_RETURN_DUE_TO_CRASH static void ftlUnreachable( + CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex) +{ + dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex); + if (nodeIndex != UINT_MAX) + dataLog(", node @", nodeIndex); + dataLog(".\n"); + CRASH(); +} +#endif + +// Using this instead of typeCheck() helps to reduce the load on B3, by creating +// significantly less dead code. +#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \ + FormattedValue _ftc_lowValue = (lowValue); \ + Edge _ftc_highValue = (highValue); \ + SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \ + if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \ + break; \ + typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \ + } while (false) + +#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \ + FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition) + +class LowerDFGToB3 { + WTF_MAKE_NONCOPYABLE(LowerDFGToB3); +public: + LowerDFGToB3(State& state) + : m_graph(state.graph) + , m_ftlState(state) + , m_out(state) + , m_proc(*state.proc) + , m_state(state.graph) + , m_interpreter(state.graph, m_state) + { + } + + void lower() + { + State* state = &m_ftlState; + + CString name; + if (verboseCompilationEnabled()) { + name = toCString( + "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(), + "_", codeBlock()->hash()); + } else + name = "jsBody"; + + m_graph.ensureDominators(); + + if (verboseCompilationEnabled()) + dataLog("Function ready, beginning lowering.\n"); + + m_out.initialize(m_heaps); + + // We use prologue frequency for all of the initialization code. + m_out.setFrequency(1); + + m_prologue = m_out.newBlock(); + LBasicBlock stackOverflow = m_out.newBlock(); + m_handleExceptions = m_out.newBlock(); + + LBasicBlock checkArguments = m_out.newBlock(); + + for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { + m_highBlock = m_graph.block(blockIndex); + if (!m_highBlock) + continue; + m_out.setFrequency(m_highBlock->executionCount); + m_blocks.add(m_highBlock, m_out.newBlock()); + } + + // Back to prologue frequency for any bocks that get sneakily created in the initialization code. + m_out.setFrequency(1); + + m_out.appendTo(m_prologue, stackOverflow); + m_out.initializeConstants(m_proc, m_prologue); + createPhiVariables(); + + size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal; + B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured); + m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured)); + state->capturedValue = capturedBase->slot(); + + auto preOrder = m_graph.blocksInPreOrder(); + + // We should not create any alloca's after this point, since they will cease to + // be mem2reg candidates. + + m_callFrame = m_out.framePointer(); + m_tagTypeNumber = m_out.constInt64(TagTypeNumber); + m_tagMask = m_out.constInt64(TagMask); + + // Make sure that B3 knows that we really care about the mask registers. This forces the + // constants to be materialized in registers. + m_proc.addFastConstant(m_tagTypeNumber->key()); + m_proc.addFastConstant(m_tagMask->key()); + + m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(JSStack::CodeBlock)); + + m_out.branch( + didOverflowStack(), rarely(stackOverflow), usually(checkArguments)); + + m_out.appendTo(stackOverflow, m_handleExceptions); + m_out.call(m_out.voidType, m_out.operation(operationThrowStackOverflowError), m_callFrame, m_out.constIntPtr(codeBlock())); + m_out.patchpoint(Void)->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams&) { + // We are terminal, so we can clobber everything. That's why we don't claim to + // clobber scratch. + AllowMacroScratchRegisterUsage allowScratch(jit); + + jit.copyCalleeSavesToVMCalleeSavesBuffer(); + jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0); + jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + CCallHelpers::Call call = jit.call(); + jit.jumpToExceptionHandler(); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(call, FunctionPtr(lookupExceptionHandlerFromCallerFrame)); + }); + }); + m_out.unreachable(); + + m_out.appendTo(m_handleExceptions, checkArguments); + Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler; + m_out.patchpoint(Void)->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams&) { + CCallHelpers::Jump jump = jit.jump(); + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler)); + }); + }); + m_out.unreachable(); + + m_out.appendTo(checkArguments, lowBlock(m_graph.block(0))); + availabilityMap().clear(); + availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0); + for (unsigned i = codeBlock()->numParameters(); i--;) { + availabilityMap().m_locals.argument(i) = + Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i))); + } + m_node = nullptr; + m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true); + for (unsigned i = codeBlock()->numParameters(); i--;) { + Node* node = m_graph.m_arguments[i]; + VirtualRegister operand = virtualRegisterForArgument(i); + + LValue jsValue = m_out.load64(addressFor(operand)); + + if (node) { + DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal); + + // This is a hack, but it's an effective one. It allows us to do CSE on the + // primordial load of arguments. This assumes that the GetLocal that got put in + // place of the original SetArgument doesn't have any effects before it. This + // should hold true. + m_loadedArgumentValues.add(node, jsValue); + } + + switch (m_graph.m_argumentFormats[i]) { + case FlushedInt32: + speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue)); + break; + case FlushedBoolean: + speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue)); + break; + case FlushedCell: + speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue)); + break; + case FlushedJSValue: + break; + default: + DFG_CRASH(m_graph, node, "Bad flush format for argument"); + break; + } + } + m_out.jump(lowBlock(m_graph.block(0))); + + for (DFG::BasicBlock* block : preOrder) + compileBlock(block); + + // Make sure everything is decorated. This does a bunch of deferred decorating. This has + // to happen last because our abstract heaps are generated lazily. They have to be + // generated lazily because we have an infiniten number of numbered, indexed, and + // absolute heaps. We only become aware of the ones we actually mention while lowering. + m_heaps.computeRangesAndDecorateInstructions(); + + // We create all Phi's up front, but we may then decide not to compile the basic block + // that would have contained one of them. So this creates orphans, which triggers B3 + // validation failures. Calling this fixes the issue. + // + // Note that you should avoid the temptation to make this call conditional upon + // validation being enabled. B3 makes no guarantees of any kind of correctness when + // dealing with IR that would have failed validation. For example, it would be valid to + // write a B3 phase that so aggressively assumes the lack of orphans that it would crash + // if any orphans were around. We might even have such phases already. + m_proc.deleteOrphans(); + + // We put the blocks into the B3 procedure in a super weird order. Now we reorder them. + m_out.applyBlockOrder(); + } + +private: + + void createPhiVariables() + { + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + DFG::BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + if (node->op() != DFG::Phi) + continue; + LType type; + switch (node->flags() & NodeResultMask) { + case NodeResultDouble: + type = m_out.doubleType; + break; + case NodeResultInt32: + type = m_out.int32; + break; + case NodeResultInt52: + type = m_out.int64; + break; + case NodeResultBoolean: + type = m_out.boolean; + break; + case NodeResultJS: + type = m_out.int64; + break; + default: + DFG_CRASH(m_graph, node, "Bad Phi node result type"); + break; + } + m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node))); + } + } + } + + void compileBlock(DFG::BasicBlock* block) + { + if (!block) + return; + + if (verboseCompilationEnabled()) + dataLog("Compiling block ", *block, "\n"); + + m_highBlock = block; + + // Make sure that any blocks created while lowering code in the high block have the frequency of + // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs + // something roughly approximate for things like register allocation. + m_out.setFrequency(m_highBlock->executionCount); + + LBasicBlock lowBlock = m_blocks.get(m_highBlock); + + m_nextHighBlock = 0; + for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) { + m_nextHighBlock = m_graph.block(nextBlockIndex); + if (m_nextHighBlock) + break; + } + m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0; + + // All of this effort to find the next block gives us the ability to keep the + // generated IR in roughly program order. This ought not affect the performance + // of the generated code (since we expect B3 to reorder things) but it will + // make IR dumps easier to read. + m_out.appendTo(lowBlock, m_nextLowBlock); + + if (Options::ftlCrashes()) + m_out.trap(); + + if (!m_highBlock->cfaHasVisited) { + if (verboseCompilationEnabled()) + dataLog("Bailing because CFA didn't reach.\n"); + crash(m_highBlock->index, UINT_MAX); + return; + } + + m_availabilityCalculator.beginBlock(m_highBlock); + + m_state.reset(); + m_state.beginBasicBlock(m_highBlock); + + for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) { + if (!compileNode(m_nodeIndex)) + break; + } + } + + void safelyInvalidateAfterTermination() + { + if (verboseCompilationEnabled()) + dataLog("Bailing.\n"); + crash(); + + // Invalidate dominated blocks. Under normal circumstances we would expect + // them to be invalidated already. But you can have the CFA become more + // precise over time because the structures of objects change on the main + // thread. Failing to do this would result in weird crashes due to a value + // being used but not defined. Race conditions FTW! + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + DFG::BasicBlock* target = m_graph.block(blockIndex); + if (!target) + continue; + if (m_graph.m_dominators->dominates(m_highBlock, target)) { + if (verboseCompilationEnabled()) + dataLog("Block ", *target, " will bail also.\n"); + target->cfaHasVisited = false; + } + } + } + + bool compileNode(unsigned nodeIndex) + { + if (!m_state.isValid()) { + safelyInvalidateAfterTermination(); + return false; + } + + m_node = m_highBlock->at(nodeIndex); + m_origin = m_node->origin; + m_out.setOrigin(m_node); + + if (verboseCompilationEnabled()) + dataLog("Lowering ", m_node, "\n"); + + m_availableRecoveries.resize(0); + + m_interpreter.startExecuting(); + + switch (m_node->op()) { + case DFG::Upsilon: + compileUpsilon(); + break; + case DFG::Phi: + compilePhi(); + break; + case JSConstant: + break; + case DoubleConstant: + compileDoubleConstant(); + break; + case Int52Constant: + compileInt52Constant(); + break; + case DoubleRep: + compileDoubleRep(); + break; + case DoubleAsInt32: + compileDoubleAsInt32(); + break; + case DFG::ValueRep: + compileValueRep(); + break; + case Int52Rep: + compileInt52Rep(); + break; + case ValueToInt32: + compileValueToInt32(); + break; + case BooleanToNumber: + compileBooleanToNumber(); + break; + case ExtractOSREntryLocal: + compileExtractOSREntryLocal(); + break; + case GetStack: + compileGetStack(); + break; + case PutStack: + compilePutStack(); + break; + case DFG::Check: + compileNoOp(); + break; + case ToThis: + compileToThis(); + break; + case ValueAdd: + compileValueAdd(); + break; + case StrCat: + compileStrCat(); + break; + case ArithAdd: + case ArithSub: + compileArithAddOrSub(); + break; + case ArithClz32: + compileArithClz32(); + break; + case ArithMul: + compileArithMul(); + break; + case ArithDiv: + compileArithDiv(); + break; + case ArithMod: + compileArithMod(); + break; + case ArithMin: + case ArithMax: + compileArithMinOrMax(); + break; + case ArithAbs: + compileArithAbs(); + break; + case ArithSin: + compileArithSin(); + break; + case ArithCos: + compileArithCos(); + break; + case ArithPow: + compileArithPow(); + break; + case ArithRandom: + compileArithRandom(); + break; + case ArithRound: + compileArithRound(); + break; + case ArithFloor: + compileArithFloor(); + break; + case ArithCeil: + compileArithCeil(); + break; + case ArithSqrt: + compileArithSqrt(); + break; + case ArithLog: + compileArithLog(); + break; + case ArithFRound: + compileArithFRound(); + break; + case ArithNegate: + compileArithNegate(); + break; + case DFG::BitAnd: + compileBitAnd(); + break; + case DFG::BitOr: + compileBitOr(); + break; + case DFG::BitXor: + compileBitXor(); + break; + case BitRShift: + compileBitRShift(); + break; + case BitLShift: + compileBitLShift(); + break; + case BitURShift: + compileBitURShift(); + break; + case UInt32ToNumber: + compileUInt32ToNumber(); + break; + case CheckStructure: + compileCheckStructure(); + break; + case CheckCell: + compileCheckCell(); + break; + case CheckNotEmpty: + compileCheckNotEmpty(); + break; + case CheckBadCell: + compileCheckBadCell(); + break; + case CheckIdent: + compileCheckIdent(); + break; + case GetExecutable: + compileGetExecutable(); + break; + case ArrayifyToStructure: + compileArrayifyToStructure(); + break; + case PutStructure: + compilePutStructure(); + break; + case GetById: + case GetByIdFlush: + compileGetById(); + break; + case In: + compileIn(); + break; + case PutById: + case PutByIdDirect: + case PutByIdFlush: + compilePutById(); + break; + case PutGetterById: + case PutSetterById: + compilePutAccessorById(); + break; + case PutGetterSetterById: + compilePutGetterSetterById(); + break; + case PutGetterByVal: + case PutSetterByVal: + compilePutAccessorByVal(); + break; + case GetButterfly: + compileGetButterfly(); + break; + case GetButterflyReadOnly: + compileGetButterflyReadOnly(); + break; + case ConstantStoragePointer: + compileConstantStoragePointer(); + break; + case GetIndexedPropertyStorage: + compileGetIndexedPropertyStorage(); + break; + case CheckArray: + compileCheckArray(); + break; + case GetArrayLength: + compileGetArrayLength(); + break; + case CheckInBounds: + compileCheckInBounds(); + break; + case GetByVal: + compileGetByVal(); + break; + case GetMyArgumentByVal: + compileGetMyArgumentByVal(); + break; + case PutByVal: + case PutByValAlias: + case PutByValDirect: + compilePutByVal(); + break; + case ArrayPush: + compileArrayPush(); + break; + case ArrayPop: + compileArrayPop(); + break; + case CreateActivation: + compileCreateActivation(); + break; + case NewFunction: + case NewArrowFunction: + case NewGeneratorFunction: + compileNewFunction(); + break; + case CreateDirectArguments: + compileCreateDirectArguments(); + break; + case CreateScopedArguments: + compileCreateScopedArguments(); + break; + case CreateClonedArguments: + compileCreateClonedArguments(); + break; + case NewObject: + compileNewObject(); + break; + case NewArray: + compileNewArray(); + break; + case NewArrayBuffer: + compileNewArrayBuffer(); + break; + case NewArrayWithSize: + compileNewArrayWithSize(); + break; + case NewTypedArray: + compileNewTypedArray(); + break; + case GetTypedArrayByteOffset: + compileGetTypedArrayByteOffset(); + break; + case AllocatePropertyStorage: + compileAllocatePropertyStorage(); + break; + case ReallocatePropertyStorage: + compileReallocatePropertyStorage(); + break; + case ToString: + case CallStringConstructor: + compileToStringOrCallStringConstructor(); + break; + case ToPrimitive: + compileToPrimitive(); + break; + case MakeRope: + compileMakeRope(); + break; + case StringCharAt: + compileStringCharAt(); + break; + case StringCharCodeAt: + compileStringCharCodeAt(); + break; + case StringFromCharCode: + compileStringFromCharCode(); + break; + case GetByOffset: + case GetGetterSetterByOffset: + compileGetByOffset(); + break; + case GetGetter: + compileGetGetter(); + break; + case GetSetter: + compileGetSetter(); + break; + case MultiGetByOffset: + compileMultiGetByOffset(); + break; + case PutByOffset: + compilePutByOffset(); + break; + case MultiPutByOffset: + compileMultiPutByOffset(); + break; + case GetGlobalVar: + case GetGlobalLexicalVariable: + compileGetGlobalVariable(); + break; + case PutGlobalVariable: + compilePutGlobalVariable(); + break; + case NotifyWrite: + compileNotifyWrite(); + break; + case GetCallee: + compileGetCallee(); + break; + case GetArgumentCount: + compileGetArgumentCount(); + break; + case GetScope: + compileGetScope(); + break; + case SkipScope: + compileSkipScope(); + break; + case GetClosureVar: + compileGetClosureVar(); + break; + case PutClosureVar: + compilePutClosureVar(); + break; + case GetFromArguments: + compileGetFromArguments(); + break; + case PutToArguments: + compilePutToArguments(); + break; + case CompareEq: + compileCompareEq(); + break; + case CompareStrictEq: + compileCompareStrictEq(); + break; + case CompareLess: + compileCompareLess(); + break; + case CompareLessEq: + compileCompareLessEq(); + break; + case CompareGreater: + compileCompareGreater(); + break; + case CompareGreaterEq: + compileCompareGreaterEq(); + break; + case LogicalNot: + compileLogicalNot(); + break; + case Call: + case TailCallInlinedCaller: + case Construct: + compileCallOrConstruct(); + break; + case TailCall: + compileTailCall(); + break; + case CallVarargs: + case CallForwardVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case ConstructVarargs: + case ConstructForwardVarargs: + compileCallOrConstructVarargs(); + break; + case LoadVarargs: + compileLoadVarargs(); + break; + case ForwardVarargs: + compileForwardVarargs(); + break; + case DFG::Jump: + compileJump(); + break; + case DFG::Branch: + compileBranch(); + break; + case DFG::Switch: + compileSwitch(); + break; + case DFG::Return: + compileReturn(); + break; + case ForceOSRExit: + compileForceOSRExit(); + break; + case Throw: + case ThrowReferenceError: + compileThrow(); + break; + case InvalidationPoint: + compileInvalidationPoint(); + break; + case IsUndefined: + compileIsUndefined(); + break; + case IsBoolean: + compileIsBoolean(); + break; + case IsNumber: + compileIsNumber(); + break; + case IsString: + compileIsString(); + break; + case IsObject: + compileIsObject(); + break; + case IsObjectOrNull: + compileIsObjectOrNull(); + break; + case IsFunction: + compileIsFunction(); + break; + case TypeOf: + compileTypeOf(); + break; + case CheckTypeInfoFlags: + compileCheckTypeInfoFlags(); + break; + case OverridesHasInstance: + compileOverridesHasInstance(); + break; + case InstanceOf: + compileInstanceOf(); + break; + case InstanceOfCustom: + compileInstanceOfCustom(); + break; + case CountExecution: + compileCountExecution(); + break; + case StoreBarrier: + compileStoreBarrier(); + break; + case HasIndexedProperty: + compileHasIndexedProperty(); + break; + case HasGenericProperty: + compileHasGenericProperty(); + break; + case HasStructureProperty: + compileHasStructureProperty(); + break; + case GetDirectPname: + compileGetDirectPname(); + break; + case GetEnumerableLength: + compileGetEnumerableLength(); + break; + case GetPropertyEnumerator: + compileGetPropertyEnumerator(); + break; + case GetEnumeratorStructurePname: + compileGetEnumeratorStructurePname(); + break; + case GetEnumeratorGenericPname: + compileGetEnumeratorGenericPname(); + break; + case ToIndexString: + compileToIndexString(); + break; + case CheckStructureImmediate: + compileCheckStructureImmediate(); + break; + case MaterializeNewObject: + compileMaterializeNewObject(); + break; + case MaterializeCreateActivation: + compileMaterializeCreateActivation(); + break; + case CheckWatchdogTimer: + compileCheckWatchdogTimer(); + break; + case CopyRest: + compileCopyRest(); + break; + case GetRestLength: + compileGetRestLength(); + break; + case RegExpExec: + compileRegExpExec(); + break; + case RegExpTest: + compileRegExpTest(); + break; + case NewRegexp: + compileNewRegexp(); + break; + case StringReplace: + compileStringReplace(); + break; + + case PhantomLocal: + case LoopHint: + case MovHint: + case ZombieHint: + case ExitOK: + case PhantomNewObject: + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomCreateActivation: + case PhantomDirectArguments: + case PhantomClonedArguments: + case PutHint: + case BottomValue: + case KillStack: + break; + default: + DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend"); + break; + } + + if (m_node->isTerminal()) + return false; + + if (!m_state.isValid()) { + safelyInvalidateAfterTermination(); + return false; + } + + m_availabilityCalculator.executeNode(m_node); + m_interpreter.executeEffects(nodeIndex); + + return true; + } + + void compileUpsilon() + { + LValue upsilonValue = nullptr; + switch (m_node->child1().useKind()) { + case DoubleRepUse: + upsilonValue = lowDouble(m_node->child1()); + break; + case Int32Use: + case KnownInt32Use: + upsilonValue = lowInt32(m_node->child1()); + break; + case Int52RepUse: + upsilonValue = lowInt52(m_node->child1()); + break; + case BooleanUse: + case KnownBooleanUse: + upsilonValue = lowBoolean(m_node->child1()); + break; + case CellUse: + case KnownCellUse: + upsilonValue = lowCell(m_node->child1()); + break; + case UntypedUse: + upsilonValue = lowJSValue(m_node->child1()); + break; + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + ValueFromBlock upsilon = m_out.anchor(upsilonValue); + LValue phiNode = m_phis.get(m_node->phi()); + m_out.addIncomingToPhi(phiNode, upsilon); + } + + void compilePhi() + { + LValue phi = m_phis.get(m_node); + m_out.m_block->append(phi); + + switch (m_node->flags() & NodeResultMask) { + case NodeResultDouble: + setDouble(phi); + break; + case NodeResultInt32: + setInt32(phi); + break; + case NodeResultInt52: + setInt52(phi); + break; + case NodeResultBoolean: + setBoolean(phi); + break; + case NodeResultJS: + setJSValue(phi); + break; + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileDoubleConstant() + { + setDouble(m_out.constDouble(m_node->asNumber())); + } + + void compileInt52Constant() + { + int64_t value = m_node->asMachineInt(); + + setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount)); + setStrictInt52(m_out.constInt64(value)); + } + + void compileDoubleRep() + { + switch (m_node->child1().useKind()) { + case RealNumberUse: { + LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation); + + LValue doubleValue = unboxDouble(value); + + LBasicBlock intCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock fastResult = m_out.anchor(doubleValue); + m_out.branch( + m_out.doubleEqual(doubleValue, doubleValue), + usually(continuation), rarely(intCase)); + + LBasicBlock lastNext = m_out.appendTo(intCase, continuation); + + FTL_TYPE_CHECK( + jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber, + isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble)); + ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + setDouble(m_out.phi(m_out.doubleType, fastResult, slowResult)); + return; + } + + case NotCellUse: + case NumberUse: { + bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse; + + LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation); + + LBasicBlock intCase = m_out.newBlock(); + LBasicBlock doubleTesting = m_out.newBlock(); + LBasicBlock doubleCase = m_out.newBlock(); + LBasicBlock nonDoubleCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + isNotInt32(value, provenType(m_node->child1())), + unsure(doubleTesting), unsure(intCase)); + + LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting); + + ValueFromBlock intToDouble = m_out.anchor( + m_out.intToDouble(unboxInt32(value))); + m_out.jump(continuation); + + m_out.appendTo(doubleTesting, doubleCase); + LValue valueIsNumber = isNumber(value, provenType(m_node->child1())); + m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase)); + + m_out.appendTo(doubleCase, nonDoubleCase); + ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value)); + m_out.jump(continuation); + + if (shouldConvertNonNumber) { + LBasicBlock undefinedCase = m_out.newBlock(); + LBasicBlock testNullCase = m_out.newBlock(); + LBasicBlock nullCase = m_out.newBlock(); + LBasicBlock testBooleanTrueCase = m_out.newBlock(); + LBasicBlock convertBooleanTrueCase = m_out.newBlock(); + LBasicBlock convertBooleanFalseCase = m_out.newBlock(); + + m_out.appendTo(nonDoubleCase, undefinedCase); + LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined)); + m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase)); + + m_out.appendTo(undefinedCase, testNullCase); + ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN)); + m_out.jump(continuation); + + m_out.appendTo(testNullCase, nullCase); + LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull)); + m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase)); + + m_out.appendTo(nullCase, testBooleanTrueCase); + ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0)); + m_out.jump(continuation); + + m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase); + LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue)); + m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase)); + + m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase); + ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1)); + m_out.jump(continuation); + + m_out.appendTo(convertBooleanFalseCase, continuation); + + LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse)); + FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCell, valueIsNotBooleanFalse); + ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setDouble(m_out.phi(m_out.doubleType, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse)); + return; + } + m_out.appendTo(nonDoubleCase, continuation); + FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue); + m_out.unreachable(); + + m_out.appendTo(continuation, lastNext); + + setDouble(m_out.phi(m_out.doubleType, intToDouble, unboxedDouble)); + return; + } + + case Int52RepUse: { + setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1()))); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + } + } + + void compileDoubleAsInt32() + { + LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode())); + setInt32(integerValue); + } + + void compileValueRep() + { + switch (m_node->child1().useKind()) { + case DoubleRepUse: { + LValue value = lowDouble(m_node->child1()); + + if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) { + value = m_out.select( + m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN)); + } + + setJSValue(boxDouble(value)); + return; + } + + case Int52RepUse: { + setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1()))); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + } + } + + void compileInt52Rep() + { + switch (m_node->child1().useKind()) { + case Int32Use: + setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1()))); + return; + + case MachineIntUse: + setStrictInt52( + jsValueToStrictInt52( + m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation))); + return; + + case DoubleRepMachineIntUse: + setStrictInt52( + doubleToStrictInt52( + m_node->child1(), lowDouble(m_node->child1()))); + return; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + void compileValueToInt32() + { + switch (m_node->child1().useKind()) { + case Int52RepUse: + setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1()))); + break; + + case DoubleRepUse: + setInt32(doubleToInt32(lowDouble(m_node->child1()))); + break; + + case NumberUse: + case NotCellUse: { + LoweredNodeValue value = m_int32Values.get(m_node->child1().node()); + if (isValid(value)) { + setInt32(value.value()); + break; + } + + value = m_jsValueValues.get(m_node->child1().node()); + if (isValid(value)) { + setInt32(numberOrNotCellToInt32(m_node->child1(), value.value())); + break; + } + + // We'll basically just get here for constants. But it's good to have this + // catch-all since we often add new representations into the mix. + setInt32( + numberOrNotCellToInt32( + m_node->child1(), + lowJSValue(m_node->child1(), ManualOperandSpeculation))); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileBooleanToNumber() + { + switch (m_node->child1().useKind()) { + case BooleanUse: { + setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), m_out.int32)); + return; + } + + case UntypedUse: { + LValue value = lowJSValue(m_node->child1()); + + if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) { + setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One)); + return; + } + + LBasicBlock booleanCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock notBooleanResult = m_out.anchor(value); + m_out.branch( + isBoolean(value, provenType(m_node->child1())), + unsure(booleanCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation); + ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr( + m_out.zeroExt(unboxBoolean(value), m_out.int64), m_tagTypeNumber)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, booleanResult, notBooleanResult)); + return; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } + } + + void compileExtractOSREntryLocal() + { + EncodedJSValue* buffer = static_cast<EncodedJSValue*>( + m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer()); + setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal()))); + } + + void compileGetStack() + { + // GetLocals arise only for captured variables and arguments. For arguments, we might have + // already loaded it. + if (LValue value = m_loadedArgumentValues.get(m_node)) { + setJSValue(value); + return; + } + + StackAccessData* data = m_node->stackAccessData(); + AbstractValue& value = m_state.variables().operand(data->local); + + DFG_ASSERT(m_graph, m_node, isConcrete(data->format)); + DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support. + + if (isInt32Speculation(value.m_type)) + setInt32(m_out.load32(payloadFor(data->machineLocal))); + else + setJSValue(m_out.load64(addressFor(data->machineLocal))); + } + + void compilePutStack() + { + StackAccessData* data = m_node->stackAccessData(); + switch (data->format) { + case FlushedJSValue: { + LValue value = lowJSValue(m_node->child1()); + m_out.store64(value, addressFor(data->machineLocal)); + break; + } + + case FlushedDouble: { + LValue value = lowDouble(m_node->child1()); + m_out.storeDouble(value, addressFor(data->machineLocal)); + break; + } + + case FlushedInt32: { + LValue value = lowInt32(m_node->child1()); + m_out.store32(value, payloadFor(data->machineLocal)); + break; + } + + case FlushedInt52: { + LValue value = lowInt52(m_node->child1()); + m_out.store64(value, addressFor(data->machineLocal)); + break; + } + + case FlushedCell: { + LValue value = lowCell(m_node->child1()); + m_out.store64(value, addressFor(data->machineLocal)); + break; + } + + case FlushedBoolean: { + speculateBoolean(m_node->child1()); + m_out.store64( + lowJSValue(m_node->child1(), ManualOperandSpeculation), + addressFor(data->machineLocal)); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad flush format"); + break; + } + } + + void compileNoOp() + { + DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate); + } + + void compileToThis() + { + LValue value = lowJSValue(m_node->child1()); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase); + ValueFromBlock fastResult = m_out.anchor(value); + m_out.branch(isType(value, FinalObjectType), usually(continuation), rarely(slowCase)); + + m_out.appendTo(slowCase, continuation); + J_JITOperation_EJ function; + if (m_graph.isStrictModeFor(m_node->origin.semantic)) + function = operationToThisStrict; + else + function = operationToThis; + ValueFromBlock slowResult = m_out.anchor( + vmCall(m_out.int64, m_out.operation(function), m_callFrame, value)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, fastResult, slowResult)); + } + + void compileValueAdd() + { + emitBinarySnippet<JITAddGenerator>(operationValueAdd); + } + + void compileStrCat() + { + LValue result; + if (m_node->child3()) { + result = vmCall( + m_out.int64, m_out.operation(operationStrCat3), m_callFrame, + lowJSValue(m_node->child1(), ManualOperandSpeculation), + lowJSValue(m_node->child2(), ManualOperandSpeculation), + lowJSValue(m_node->child3(), ManualOperandSpeculation)); + } else { + result = vmCall( + m_out.int64, m_out.operation(operationStrCat2), m_callFrame, + lowJSValue(m_node->child1(), ManualOperandSpeculation), + lowJSValue(m_node->child2(), ManualOperandSpeculation)); + } + setJSValue(result); + } + + void compileArithAddOrSub() + { + bool isSub = m_node->op() == ArithSub; + switch (m_node->binaryUseKind()) { + case Int32Use: { + LValue left = lowInt32(m_node->child1()); + LValue right = lowInt32(m_node->child2()); + + if (!shouldCheckOverflow(m_node->arithMode())) { + setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right)); + break; + } + + CheckValue* result = + isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right); + blessSpeculation(result, Overflow, noValue(), nullptr, m_origin); + setInt32(result); + break; + } + + case Int52RepUse: { + if (!abstractValue(m_node->child1()).couldBeType(SpecInt52) + && !abstractValue(m_node->child2()).couldBeType(SpecInt52)) { + Int52Kind kind; + LValue left = lowWhicheverInt52(m_node->child1(), kind); + LValue right = lowInt52(m_node->child2(), kind); + setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind); + break; + } + + LValue left = lowInt52(m_node->child1()); + LValue right = lowInt52(m_node->child2()); + CheckValue* result = + isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right); + blessSpeculation(result, Overflow, noValue(), nullptr, m_origin); + setInt52(result); + break; + } + + case DoubleRepUse: { + LValue C1 = lowDouble(m_node->child1()); + LValue C2 = lowDouble(m_node->child2()); + + setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2)); + break; + } + + case UntypedUse: { + if (!isSub) { + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + + emitBinarySnippet<JITSubGenerator>(operationValueSub); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileArithClz32() + { + LValue operand = lowInt32(m_node->child1()); + setInt32(m_out.ctlz32(operand)); + } + + void compileArithMul() + { + switch (m_node->binaryUseKind()) { + case Int32Use: { + LValue left = lowInt32(m_node->child1()); + LValue right = lowInt32(m_node->child2()); + + LValue result; + + if (!shouldCheckOverflow(m_node->arithMode())) + result = m_out.mul(left, right); + else { + CheckValue* speculation = m_out.speculateMul(left, right); + blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin); + result = speculation; + } + + if (shouldCheckNegativeZero(m_node->arithMode())) { + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.notZero32(result), usually(continuation), rarely(slowCase)); + + LBasicBlock lastNext = m_out.appendTo(slowCase, continuation); + speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero)); + speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero)); + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + } + + setInt32(result); + break; + } + + case Int52RepUse: { + Int52Kind kind; + LValue left = lowWhicheverInt52(m_node->child1(), kind); + LValue right = lowInt52(m_node->child2(), opposite(kind)); + + CheckValue* result = m_out.speculateMul(left, right); + blessSpeculation(result, Overflow, noValue(), nullptr, m_origin); + + if (shouldCheckNegativeZero(m_node->arithMode())) { + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.notZero64(result), usually(continuation), rarely(slowCase)); + + LBasicBlock lastNext = m_out.appendTo(slowCase, continuation); + speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero)); + speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero)); + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + } + + setInt52(result); + break; + } + + case DoubleRepUse: { + setDouble( + m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2()))); + break; + } + + case UntypedUse: { + emitBinarySnippet<JITMulGenerator>(operationValueMul); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileArithDiv() + { + switch (m_node->binaryUseKind()) { + case Int32Use: { + LValue numerator = lowInt32(m_node->child1()); + LValue denominator = lowInt32(m_node->child2()); + + if (shouldCheckNegativeZero(m_node->arithMode())) { + LBasicBlock zeroNumerator = m_out.newBlock(); + LBasicBlock numeratorContinuation = m_out.newBlock(); + + m_out.branch( + m_out.isZero32(numerator), + rarely(zeroNumerator), usually(numeratorContinuation)); + + LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation); + + speculate( + NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero)); + + m_out.jump(numeratorContinuation); + + m_out.appendTo(numeratorContinuation, innerLastNext); + } + + if (shouldCheckOverflow(m_node->arithMode())) { + LBasicBlock unsafeDenominator = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue adjustedDenominator = m_out.add(denominator, m_out.int32One); + m_out.branch( + m_out.above(adjustedDenominator, m_out.int32One), + usually(continuation), rarely(unsafeDenominator)); + + LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation); + LValue neg2ToThe31 = m_out.constInt32(-2147483647-1); + speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator)); + speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue result = m_out.div(numerator, denominator); + speculate( + Overflow, noValue(), 0, + m_out.notEqual(m_out.mul(result, denominator), numerator)); + setInt32(result); + } else + setInt32(m_out.chillDiv(numerator, denominator)); + + break; + } + + case DoubleRepUse: { + setDouble(m_out.doubleDiv( + lowDouble(m_node->child1()), lowDouble(m_node->child2()))); + break; + } + + case UntypedUse: { + emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileArithMod() + { + switch (m_node->binaryUseKind()) { + case Int32Use: { + LValue numerator = lowInt32(m_node->child1()); + LValue denominator = lowInt32(m_node->child2()); + + LValue remainder; + if (shouldCheckOverflow(m_node->arithMode())) { + LBasicBlock unsafeDenominator = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue adjustedDenominator = m_out.add(denominator, m_out.int32One); + m_out.branch( + m_out.above(adjustedDenominator, m_out.int32One), + usually(continuation), rarely(unsafeDenominator)); + + LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation); + LValue neg2ToThe31 = m_out.constInt32(-2147483647-1); + speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator)); + speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue result = m_out.mod(numerator, denominator); + remainder = result; + } else + remainder = m_out.chillMod(numerator, denominator); + + if (shouldCheckNegativeZero(m_node->arithMode())) { + LBasicBlock negativeNumerator = m_out.newBlock(); + LBasicBlock numeratorContinuation = m_out.newBlock(); + + m_out.branch( + m_out.lessThan(numerator, m_out.int32Zero), + unsure(negativeNumerator), unsure(numeratorContinuation)); + + LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation); + + speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder)); + + m_out.jump(numeratorContinuation); + + m_out.appendTo(numeratorContinuation, innerLastNext); + } + + setInt32(remainder); + break; + } + + case DoubleRepUse: { + setDouble( + m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2()))); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileArithMinOrMax() + { + switch (m_node->binaryUseKind()) { + case Int32Use: { + LValue left = lowInt32(m_node->child1()); + LValue right = lowInt32(m_node->child2()); + + setInt32( + m_out.select( + m_node->op() == ArithMin + ? m_out.lessThan(left, right) + : m_out.lessThan(right, left), + left, right)); + break; + } + + case DoubleRepUse: { + LValue left = lowDouble(m_node->child1()); + LValue right = lowDouble(m_node->child2()); + + LBasicBlock notLessThan = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock, 2> results; + + results.append(m_out.anchor(left)); + m_out.branch( + m_node->op() == ArithMin + ? m_out.doubleLessThan(left, right) + : m_out.doubleGreaterThan(left, right), + unsure(continuation), unsure(notLessThan)); + + LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation); + results.append(m_out.anchor(m_out.select( + m_node->op() == ArithMin + ? m_out.doubleGreaterThanOrEqual(left, right) + : m_out.doubleLessThanOrEqual(left, right), + right, m_out.constDouble(PNaN)))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setDouble(m_out.phi(m_out.doubleType, results)); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileArithAbs() + { + switch (m_node->child1().useKind()) { + case Int32Use: { + LValue value = lowInt32(m_node->child1()); + + LValue mask = m_out.aShr(value, m_out.constInt32(31)); + LValue result = m_out.bitXor(mask, m_out.add(mask, value)); + + if (shouldCheckOverflow(m_node->arithMode())) + speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero)); + + setInt32(result); + break; + } + + case DoubleRepUse: { + setDouble(m_out.doubleAbs(lowDouble(m_node->child1()))); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileArithSin() { setDouble(m_out.doubleSin(lowDouble(m_node->child1()))); } + + void compileArithCos() { setDouble(m_out.doubleCos(lowDouble(m_node->child1()))); } + + void compileArithPow() + { + if (m_node->child2().useKind() == Int32Use) + setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2()))); + else { + LValue base = lowDouble(m_node->child1()); + LValue exponent = lowDouble(m_node->child2()); + + LBasicBlock integerExponentIsSmallBlock = m_out.newBlock(); + LBasicBlock integerExponentPowBlock = m_out.newBlock(); + LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock(); + LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock(); + LBasicBlock nanExceptionBaseIsOne = m_out.newBlock(); + LBasicBlock powBlock = m_out.newBlock(); + LBasicBlock nanExceptionResultIsNaN = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue integerExponent = m_out.doubleToInt(exponent); + LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent); + LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble); + m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry)); + + LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock); + LValue integerExponentBelow1000 = m_out.below(integerExponent, m_out.constInt32(1000)); + m_out.branch(integerExponentBelow1000, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry)); + + m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry); + ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent)); + m_out.jump(continuation); + + // If y is NaN, the result is NaN. + m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionExponentIsInfinity); + LValue exponentIsNaN; + if (provenType(m_node->child2()) & SpecDoubleNaN) + exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent); + else + exponentIsNaN = m_out.booleanFalse; + m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionExponentIsInfinity)); + + // If abs(x) is 1 and y is +infinity, the result is NaN. + // If abs(x) is 1 and y is -infinity, the result is NaN. + m_out.appendTo(nanExceptionExponentIsInfinity, nanExceptionBaseIsOne); + LValue absoluteExponent = m_out.doubleAbs(exponent); + LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity())); + m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionBaseIsOne), usually(powBlock)); + + m_out.appendTo(nanExceptionBaseIsOne, powBlock); + LValue absoluteBase = m_out.doubleAbs(base); + LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1)); + m_out.branch(absoluteBaseIsOne, unsure(nanExceptionResultIsNaN), unsure(powBlock)); + + m_out.appendTo(powBlock, nanExceptionResultIsNaN); + ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent)); + m_out.jump(continuation); + + m_out.appendTo(nanExceptionResultIsNaN, continuation); + ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setDouble(m_out.phi(m_out.doubleType, powDoubleIntResult, powResult, pureNan)); + } + } + + void compileArithRandom() + { + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + + // Inlined WeakRandom::advance(). + // uint64_t x = m_low; + void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset(); + LValue low = m_out.load64(m_out.absolute(lowAddress)); + // uint64_t y = m_high; + void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset(); + LValue high = m_out.load64(m_out.absolute(highAddress)); + // m_low = y; + m_out.store64(high, m_out.absolute(lowAddress)); + + // x ^= x << 23; + LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low); + + // x ^= x >> 17; + LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1); + + // x ^= y ^ (y >> 26); + LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2); + + // m_high = x; + m_out.store64(phase3, m_out.absolute(highAddress)); + + // return x + y; + LValue random64 = m_out.add(phase3, high); + + // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation. + LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1)); + + LValue double53Integer = m_out.intToDouble(random53); + + // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`. + // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)). + static const double scale = 1.0 / (1ULL << 53); + + // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer. + // It just reduces the exp part of the given 53bit double integer. + // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.) + // Now we get 53bit precision random double value in [0, 1). + LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale)); + + setDouble(result); + } + + void compileArithRound() + { + LValue result = nullptr; + + if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) { + LValue value = lowDouble(m_node->child1()); + result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5))); + } else { + LBasicBlock realPartIsMoreThanHalf = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue value = lowDouble(m_node->child1()); + LValue integerValue = m_out.doubleCeil(value); + ValueFromBlock integerValueResult = m_out.anchor(integerValue); + + LValue realPart = m_out.doubleSub(integerValue, value); + + m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation); + LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1)); + ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown); + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + + result = m_out.phi(m_out.doubleType, integerValueResult, integerValueRoundedDownResult); + } + + if (producesInteger(m_node->arithRoundingMode())) { + LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())); + setInt32(integerValue); + } else + setDouble(result); + } + + void compileArithFloor() + { + LValue value = lowDouble(m_node->child1()); + LValue integerValue = m_out.doubleFloor(value); + if (producesInteger(m_node->arithRoundingMode())) + setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode()))); + else + setDouble(integerValue); + } + + void compileArithCeil() + { + LValue value = lowDouble(m_node->child1()); + LValue integerValue = m_out.doubleCeil(value); + if (producesInteger(m_node->arithRoundingMode())) + setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode()))); + else + setDouble(integerValue); + } + + void compileArithSqrt() { setDouble(m_out.doubleSqrt(lowDouble(m_node->child1()))); } + + void compileArithLog() { setDouble(m_out.doubleLog(lowDouble(m_node->child1()))); } + + void compileArithFRound() + { + setDouble(m_out.fround(lowDouble(m_node->child1()))); + } + + void compileArithNegate() + { + switch (m_node->child1().useKind()) { + case Int32Use: { + LValue value = lowInt32(m_node->child1()); + + LValue result; + if (!shouldCheckOverflow(m_node->arithMode())) + result = m_out.neg(value); + else if (!shouldCheckNegativeZero(m_node->arithMode())) { + CheckValue* check = m_out.speculateSub(m_out.int32Zero, value); + blessSpeculation(check, Overflow, noValue(), nullptr, m_origin); + result = check; + } else { + speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff))); + result = m_out.neg(value); + } + + setInt32(result); + break; + } + + case Int52RepUse: { + if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)) { + Int52Kind kind; + LValue value = lowWhicheverInt52(m_node->child1(), kind); + LValue result = m_out.neg(value); + if (shouldCheckNegativeZero(m_node->arithMode())) + speculate(NegativeZero, noValue(), 0, m_out.isZero64(result)); + setInt52(result, kind); + break; + } + + LValue value = lowInt52(m_node->child1()); + CheckValue* result = m_out.speculateSub(m_out.int64Zero, value); + blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin); + speculate(NegativeZero, noValue(), 0, m_out.isZero64(result)); + setInt52(result); + break; + } + + case DoubleRepUse: { + setDouble(m_out.doubleNeg(lowDouble(m_node->child1()))); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileBitAnd() + { + if (m_node->isBinaryUseKind(UntypedUse)) { + emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd); + return; + } + setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2()))); + } + + void compileBitOr() + { + if (m_node->isBinaryUseKind(UntypedUse)) { + emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr); + return; + } + setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2()))); + } + + void compileBitXor() + { + if (m_node->isBinaryUseKind(UntypedUse)) { + emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor); + return; + } + setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2()))); + } + + void compileBitRShift() + { + if (m_node->isBinaryUseKind(UntypedUse)) { + emitRightShiftSnippet(JITRightShiftGenerator::SignedShift); + return; + } + setInt32(m_out.aShr( + lowInt32(m_node->child1()), + m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31)))); + } + + void compileBitLShift() + { + if (m_node->isBinaryUseKind(UntypedUse)) { + emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift); + return; + } + setInt32(m_out.shl( + lowInt32(m_node->child1()), + m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31)))); + } + + void compileBitURShift() + { + if (m_node->isBinaryUseKind(UntypedUse)) { + emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift); + return; + } + setInt32(m_out.lShr( + lowInt32(m_node->child1()), + m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31)))); + } + + void compileUInt32ToNumber() + { + LValue value = lowInt32(m_node->child1()); + + if (doesOverflow(m_node->arithMode())) { + setDouble(m_out.unsignedToDouble(value)); + return; + } + + speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero)); + setInt32(value); + } + + void compileCheckStructure() + { + ExitKind exitKind; + if (m_node->child1()->hasConstant()) + exitKind = BadConstantCache; + else + exitKind = BadCache; + + switch (m_node->child1().useKind()) { + case CellUse: + case KnownCellUse: { + LValue cell = lowCell(m_node->child1()); + + checkStructure( + m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell), + exitKind, m_node->structureSet(), + [&] (Structure* structure) { + return weakStructureID(structure); + }); + return; + } + + case CellOrOtherUse: { + LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase); + checkStructure( + m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value), + exitKind, m_node->structureSet(), + [&] (Structure* structure) { + return weakStructureID(structure); + }); + m_out.jump(continuation); + + m_out.appendTo(notCellCase, continuation); + FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return; + } + } + + void compileCheckCell() + { + LValue cell = lowCell(m_node->child1()); + + speculate( + BadCell, jsValueValue(cell), m_node->child1().node(), + m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell()))); + } + + void compileCheckBadCell() + { + terminate(BadCell); + } + + void compileCheckNotEmpty() + { + speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1()))); + } + + void compileCheckIdent() + { + UniquedStringImpl* uid = m_node->uidOperand(); + if (uid->isSymbol()) { + LValue symbol = lowSymbol(m_node->child1()); + LValue stringImpl = m_out.loadPtr(symbol, m_heaps.Symbol_privateName); + speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid))); + } else { + LValue string = lowStringIdent(m_node->child1()); + LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value); + speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid))); + } + } + + void compileGetExecutable() + { + LValue cell = lowCell(m_node->child1()); + speculateFunction(m_node->child1(), cell); + setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable)); + } + + void compileArrayifyToStructure() + { + LValue cell = lowCell(m_node->child1()); + LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0; + + LBasicBlock unexpectedStructure = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID); + + m_out.branch( + m_out.notEqual(structureID, weakStructureID(m_node->structure())), + rarely(unexpectedStructure), usually(continuation)); + + LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation); + + if (property) { + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: + speculate( + Uncountable, noValue(), 0, + m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX))); + break; + default: + break; + } + } + + switch (m_node->arrayMode().type()) { + case Array::Int32: + vmCall(m_out.voidType, m_out.operation(operationEnsureInt32), m_callFrame, cell); + break; + case Array::Double: + vmCall(m_out.voidType, m_out.operation(operationEnsureDouble), m_callFrame, cell); + break; + case Array::Contiguous: + vmCall(m_out.voidType, m_out.operation(operationEnsureContiguous), m_callFrame, cell); + break; + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: + vmCall(m_out.voidType, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell); + break; + default: + DFG_CRASH(m_graph, m_node, "Bad array type"); + break; + } + + structureID = m_out.load32(cell, m_heaps.JSCell_structureID); + speculate( + BadIndexingType, jsValueValue(cell), 0, + m_out.notEqual(structureID, weakStructureID(m_node->structure()))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void compilePutStructure() + { + m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node); + + Structure* oldStructure = m_node->transition()->previous; + Structure* newStructure = m_node->transition()->next; + ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType()); + ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags()); + ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type()); + + LValue cell = lowCell(m_node->child1()); + m_out.store32( + weakStructureID(newStructure), + cell, m_heaps.JSCell_structureID); + } + + void compileGetById() + { + switch (m_node->child1().useKind()) { + case CellUse: { + setJSValue(getById(lowCell(m_node->child1()))); + return; + } + + case UntypedUse: { + // This is pretty weird, since we duplicate the slow path both here and in the + // code generated by the IC. We should investigate making this less bad. + // https://bugs.webkit.org/show_bug.cgi?id=127830 + LValue value = lowJSValue(m_node->child1()); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase); + ValueFromBlock cellResult = m_out.anchor(getById(value)); + m_out.jump(continuation); + + m_out.appendTo(notCellCase, continuation); + ValueFromBlock notCellResult = m_out.anchor(vmCall( + m_out.int64, m_out.operation(operationGetByIdGeneric), + m_callFrame, value, + m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, cellResult, notCellResult)); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return; + } + } + + void compilePutById() + { + Node* node = m_node; + + // See above; CellUse is easier so we do only that for now. + ASSERT(node->child1().useKind() == CellUse); + + LValue base = lowCell(node->child1()); + LValue value = lowJSValue(node->child2()); + auto uid = m_graph.identifiers()[node->identifierNumber()]; + + B3::PatchpointValue* patchpoint = m_out.patchpoint(Void); + patchpoint->appendSomeRegister(base); + patchpoint->appendSomeRegister(value); + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + + // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers. + // https://bugs.webkit.org/show_bug.cgi?id=152848 + + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + + State* state = &m_ftlState; + ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode(); + + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + CallSiteIndex callSiteIndex = + state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic); + + Box<CCallHelpers::JumpList> exceptions = + exceptionHandle->scheduleExitCreation(params)->jumps(jit); + + // JS setter call ICs generated by the PutById IC will need this. + exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); + + auto generator = Box<JITPutByIdGenerator>::create( + jit.codeBlock(), node->origin.semantic, callSiteIndex, + params.unavailableRegisters(), JSValueRegs(params[0].gpr()), + JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode, + node->op() == PutByIdDirect ? Direct : NotDirect); + + generator->generateFastPath(jit); + CCallHelpers::Label done = jit.label(); + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + generator->slowPathJump().link(&jit); + CCallHelpers::Label slowPathBegin = jit.label(); + CCallHelpers::Call slowPathCall = callOperation( + *state, params.unavailableRegisters(), jit, node->origin.semantic, + exceptions.get(), generator->slowPathFunction(), InvalidGPRReg, + CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(), + params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call(); + jit.jump().linkTo(done, &jit); + + generator->reportSlowPathCall(slowPathBegin, slowPathCall); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + generator->finalize(linkBuffer); + }); + }); + }); + } + + void compileGetButterfly() + { + setStorage(loadButterflyWithBarrier(lowCell(m_node->child1()))); + } + + void compileGetButterflyReadOnly() + { + setStorage(loadButterflyReadOnly(lowCell(m_node->child1()))); + } + + void compileConstantStoragePointer() + { + setStorage(m_out.constIntPtr(m_node->storagePointer())); + } + + void compileGetIndexedPropertyStorage() + { + LValue cell = lowCell(m_node->child1()); + + if (m_node->arrayMode().type() == Array::String) { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value); + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + + m_out.branch( + m_out.notNull(fastResultValue), usually(continuation), rarely(slowPath)); + + LBasicBlock lastNext = m_out.appendTo(slowPath, continuation); + + ValueFromBlock slowResult = m_out.anchor( + vmCall(m_out.intPtr, m_out.operation(operationResolveRope), m_callFrame, cell)); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + setStorage(m_out.loadPtr(m_out.phi(m_out.intPtr, fastResult, slowResult), m_heaps.StringImpl_data)); + return; + } + + setStorage(loadVectorWithBarrier(cell)); + } + + void compileCheckArray() + { + Edge edge = m_node->child1(); + LValue cell = lowCell(edge); + + if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge))) + return; + + speculate( + BadIndexingType, jsValueValue(cell), 0, + m_out.logicalNot(isArrayType(cell, m_node->arrayMode()))); + } + + void compileGetTypedArrayByteOffset() + { + LValue basePtr = lowCell(m_node->child1()); + + LBasicBlock simpleCase = m_out.newBlock(); + LBasicBlock wastefulCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode); + m_out.branch( + m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)), + unsure(simpleCase), unsure(wastefulCase)); + + LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase); + + ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0)); + + m_out.jump(continuation); + + m_out.appendTo(wastefulCase, continuation); + + LValue vectorPtr = loadVectorReadOnly(basePtr); + LValue butterflyPtr = loadButterflyReadOnly(basePtr); + LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer); + LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data); + + ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr)); + + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + + setInt32(m_out.castToInt32(m_out.phi(m_out.intPtr, simpleOut, wastefulOut))); + } + + void compileGetArrayLength() + { + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: { + setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength)); + return; + } + + case Array::String: { + LValue string = lowCell(m_node->child1()); + setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length)); + return; + } + + case Array::DirectArguments: { + LValue arguments = lowCell(m_node->child1()); + speculate( + ExoticObjectMode, noValue(), nullptr, + m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_overrides))); + setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length)); + return; + } + + case Array::ScopedArguments: { + LValue arguments = lowCell(m_node->child1()); + speculate( + ExoticObjectMode, noValue(), nullptr, + m_out.notZero32(m_out.load8ZeroExt32(arguments, m_heaps.ScopedArguments_overrodeThings))); + setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength)); + return; + } + + default: + if (m_node->arrayMode().isSomeTypedArrayView()) { + setInt32( + m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length)); + return; + } + + DFG_CRASH(m_graph, m_node, "Bad array type"); + return; + } + } + + void compileCheckInBounds() + { + speculate( + OutOfBounds, noValue(), 0, + m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2()))); + } + + void compileGetByVal() + { + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Contiguous: { + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ? + m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties; + + if (m_node->arrayMode().isInBounds()) { + LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2())); + LValue isHole = m_out.isZero64(result); + if (m_node->arrayMode().isSaneChain()) { + DFG_ASSERT( + m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous); + result = m_out.select( + isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result); + } else + speculate(LoadFromHole, noValue(), 0, isHole); + setJSValue(result); + return; + } + + LValue base = lowCell(m_node->child1()); + + LBasicBlock fastCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual( + index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)), + rarely(slowCase), usually(fastCase)); + + LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase); + + LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_node->child2())); + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + m_out.branch( + m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation)); + + m_out.appendTo(slowCase, continuation); + ValueFromBlock slowResult = m_out.anchor( + vmCall(m_out.int64, m_out.operation(operationGetByValArrayInt), m_callFrame, base, index)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, fastResult, slowResult)); + return; + } + + case Array::Double: { + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties; + + if (m_node->arrayMode().isInBounds()) { + LValue result = m_out.loadDouble( + baseIndex(heap, storage, index, m_node->child2())); + + if (!m_node->arrayMode().isSaneChain()) { + speculate( + LoadFromHole, noValue(), 0, + m_out.doubleNotEqualOrUnordered(result, result)); + } + setDouble(result); + break; + } + + LValue base = lowCell(m_node->child1()); + + LBasicBlock inBounds = m_out.newBlock(); + LBasicBlock boxPath = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual( + index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)), + rarely(slowCase), usually(inBounds)); + + LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath); + LValue doubleValue = m_out.loadDouble( + baseIndex(heap, storage, index, m_node->child2())); + m_out.branch( + m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), + rarely(slowCase), usually(boxPath)); + + m_out.appendTo(boxPath, slowCase); + ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue)); + m_out.jump(continuation); + + m_out.appendTo(slowCase, continuation); + ValueFromBlock slowResult = m_out.anchor( + vmCall(m_out.int64, m_out.operation(operationGetByValArrayInt), m_callFrame, base, index)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, fastResult, slowResult)); + return; + } + + case Array::Undecided: { + LValue index = lowInt32(m_node->child2()); + + speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero)); + setJSValue(m_out.constInt64(ValueUndefined)); + return; + } + + case Array::DirectArguments: { + LValue base = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + + speculate( + ExoticObjectMode, noValue(), nullptr, + m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_overrides))); + speculate( + ExoticObjectMode, noValue(), nullptr, + m_out.aboveOrEqual( + index, + m_out.load32NonNegative(base, m_heaps.DirectArguments_length))); + + TypedPointer address = m_out.baseIndex( + m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index)); + setJSValue(m_out.load64(address)); + return; + } + + case Array::ScopedArguments: { + LValue base = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + + speculate( + ExoticObjectMode, noValue(), nullptr, + m_out.aboveOrEqual( + index, + m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength))); + + LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table); + LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length); + + LBasicBlock namedCase = m_out.newBlock(); + LBasicBlock overflowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase)); + + LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase); + + LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope); + LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments); + + TypedPointer address = m_out.baseIndex( + m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index)); + LValue scopeOffset = m_out.load32(address); + + speculate( + ExoticObjectMode, noValue(), nullptr, + m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset))); + + address = m_out.baseIndex( + m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset)); + ValueFromBlock namedResult = m_out.anchor(m_out.load64(address)); + m_out.jump(continuation); + + m_out.appendTo(overflowCase, continuation); + + address = m_out.baseIndex( + m_heaps.ScopedArguments_overflowStorage, base, + m_out.zeroExtPtr(m_out.sub(index, namedLength))); + LValue overflowValue = m_out.load64(address); + speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue)); + ValueFromBlock overflowResult = m_out.anchor(overflowValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, namedResult, overflowResult)); + return; + } + + case Array::Generic: { + setJSValue(vmCall( + m_out.int64, m_out.operation(operationGetByVal), m_callFrame, + lowJSValue(m_node->child1()), lowJSValue(m_node->child2()))); + return; + } + + case Array::String: { + compileStringCharAt(); + return; + } + + default: { + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + TypedArrayType type = m_node->arrayMode().typedArrayType(); + + if (isTypedView(type)) { + TypedPointer pointer = TypedPointer( + m_heaps.typedArrayProperties, + m_out.add( + storage, + m_out.shl( + m_out.zeroExtPtr(index), + m_out.constIntPtr(logElementSize(type))))); + + if (isInt(type)) { + LValue result; + switch (elementSize(type)) { + case 1: + result = isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer); + break; + case 2: + result = isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer); + break; + case 4: + result = m_out.load32(pointer); + break; + default: + DFG_CRASH(m_graph, m_node, "Bad element size"); + } + + if (elementSize(type) < 4 || isSigned(type)) { + setInt32(result); + return; + } + + if (m_node->shouldSpeculateInt32()) { + speculate( + Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero)); + setInt32(result); + return; + } + + if (m_node->shouldSpeculateMachineInt()) { + setStrictInt52(m_out.zeroExt(result, m_out.int64)); + return; + } + + setDouble(m_out.unsignedToDouble(result)); + return; + } + + ASSERT(isFloat(type)); + + LValue result; + switch (type) { + case TypeFloat32: + result = m_out.floatToDouble(m_out.loadFloat(pointer)); + break; + case TypeFloat64: + result = m_out.loadDouble(pointer); + break; + default: + DFG_CRASH(m_graph, m_node, "Bad typed array type"); + } + + setDouble(result); + return; + } + + DFG_CRASH(m_graph, m_node, "Bad array type"); + return; + } } + } + + void compileGetMyArgumentByVal() + { + InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame; + + LValue index = lowInt32(m_node->child2()); + + LValue limit; + if (inlineCallFrame && !inlineCallFrame->isVarargs()) + limit = m_out.constInt32(inlineCallFrame->arguments.size() - 1); + else { + VirtualRegister argumentCountRegister; + if (!inlineCallFrame) + argumentCountRegister = VirtualRegister(JSStack::ArgumentCount); + else + argumentCountRegister = inlineCallFrame->argumentCountRegister; + limit = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One); + } + + speculate(ExoticObjectMode, noValue(), 0, m_out.aboveOrEqual(index, limit)); + + TypedPointer base; + if (inlineCallFrame) { + if (inlineCallFrame->arguments.size() <= 1) { + // We should have already exited due to the bounds check, above. Just tell the + // compiler that anything dominated by this instruction is not reachable, so + // that we don't waste time generating such code. This will also plant some + // kind of crashing instruction so that if by some fluke the bounds check didn't + // work, we'll crash in an easy-to-see way. + didAlreadyTerminate(); + return; + } + base = addressFor(inlineCallFrame->arguments[1].virtualRegister()); + } else + base = addressFor(virtualRegisterForArgument(1)); + + LValue pointer = m_out.baseIndex( + base.value(), m_out.zeroExt(index, m_out.intPtr), ScaleEight); + setJSValue(m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer))); + } + + void compilePutByVal() + { + Edge child1 = m_graph.varArgChild(m_node, 0); + Edge child2 = m_graph.varArgChild(m_node, 1); + Edge child3 = m_graph.varArgChild(m_node, 2); + Edge child4 = m_graph.varArgChild(m_node, 3); + Edge child5 = m_graph.varArgChild(m_node, 4); + + switch (m_node->arrayMode().type()) { + case Array::Generic: { + V_JITOperation_EJJJ operation; + if (m_node->op() == PutByValDirect) { + if (m_graph.isStrictModeFor(m_node->origin.semantic)) + operation = operationPutByValDirectStrict; + else + operation = operationPutByValDirectNonStrict; + } else { + if (m_graph.isStrictModeFor(m_node->origin.semantic)) + operation = operationPutByValStrict; + else + operation = operationPutByValNonStrict; + } + + vmCall( + m_out.voidType, m_out.operation(operation), m_callFrame, + lowJSValue(child1), lowJSValue(child2), lowJSValue(child3)); + return; + } + + default: + break; + } + + LValue base = lowCell(child1); + LValue index = lowInt32(child2); + LValue storage = lowStorage(child4); + + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: { + LBasicBlock continuation = m_out.newBlock(); + LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation); + + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Contiguous: { + LValue value = lowJSValue(child3, ManualOperandSpeculation); + + if (m_node->arrayMode().type() == Array::Int32) + FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32, isNotInt32(value)); + + TypedPointer elementPointer = m_out.baseIndex( + m_node->arrayMode().type() == Array::Int32 ? + m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties, + storage, m_out.zeroExtPtr(index), provenValue(child2)); + + if (m_node->op() == PutByValAlias) { + m_out.store64(value, elementPointer); + break; + } + + contiguousPutByValOutOfBounds( + codeBlock()->isStrictMode() + ? operationPutByValBeyondArrayBoundsStrict + : operationPutByValBeyondArrayBoundsNonStrict, + base, storage, index, value, continuation); + + m_out.store64(value, elementPointer); + break; + } + + case Array::Double: { + LValue value = lowDouble(child3); + + FTL_TYPE_CHECK( + doubleValue(value), child3, SpecDoubleReal, + m_out.doubleNotEqualOrUnordered(value, value)); + + TypedPointer elementPointer = m_out.baseIndex( + m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index), + provenValue(child2)); + + if (m_node->op() == PutByValAlias) { + m_out.storeDouble(value, elementPointer); + break; + } + + contiguousPutByValOutOfBounds( + codeBlock()->isStrictMode() + ? operationPutDoubleByValBeyondArrayBoundsStrict + : operationPutDoubleByValBeyondArrayBoundsNonStrict, + base, storage, index, value, continuation); + + m_out.storeDouble(value, elementPointer); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad array type"); + } + + m_out.jump(continuation); + m_out.appendTo(continuation, outerLastNext); + return; + } + + default: + TypedArrayType type = m_node->arrayMode().typedArrayType(); + + if (isTypedView(type)) { + TypedPointer pointer = TypedPointer( + m_heaps.typedArrayProperties, + m_out.add( + storage, + m_out.shl( + m_out.zeroExt(index, m_out.intPtr), + m_out.constIntPtr(logElementSize(type))))); + + Output::StoreType storeType; + LValue valueToStore; + + if (isInt(type)) { + LValue intValue; + switch (child3.useKind()) { + case Int52RepUse: + case Int32Use: { + if (child3.useKind() == Int32Use) + intValue = lowInt32(child3); + else + intValue = m_out.castToInt32(lowStrictInt52(child3)); + + if (isClamped(type)) { + ASSERT(elementSize(type) == 1); + + LBasicBlock atLeastZero = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock, 2> intValues; + intValues.append(m_out.anchor(m_out.int32Zero)); + m_out.branch( + m_out.lessThan(intValue, m_out.int32Zero), + unsure(continuation), unsure(atLeastZero)); + + LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation); + + intValues.append(m_out.anchor(m_out.select( + m_out.greaterThan(intValue, m_out.constInt32(255)), + m_out.constInt32(255), + intValue))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + intValue = m_out.phi(m_out.int32, intValues); + } + break; + } + + case DoubleRepUse: { + LValue doubleValue = lowDouble(child3); + + if (isClamped(type)) { + ASSERT(elementSize(type) == 1); + + LBasicBlock atLeastZero = m_out.newBlock(); + LBasicBlock withinRange = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock, 3> intValues; + intValues.append(m_out.anchor(m_out.int32Zero)); + m_out.branch( + m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero), + unsure(continuation), unsure(atLeastZero)); + + LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange); + intValues.append(m_out.anchor(m_out.constInt32(255))); + m_out.branch( + m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)), + unsure(continuation), unsure(withinRange)); + + m_out.appendTo(withinRange, continuation); + intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + intValue = m_out.phi(m_out.int32, intValues); + } else + intValue = doubleToInt32(doubleValue); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + } + + valueToStore = intValue; + switch (elementSize(type)) { + case 1: + storeType = Output::Store32As8; + break; + case 2: + storeType = Output::Store32As16; + break; + case 4: + storeType = Output::Store32; + break; + default: + DFG_CRASH(m_graph, m_node, "Bad element size"); + } + } else /* !isInt(type) */ { + LValue value = lowDouble(child3); + switch (type) { + case TypeFloat32: + valueToStore = m_out.doubleToFloat(value); + storeType = Output::StoreFloat; + break; + case TypeFloat64: + valueToStore = value; + storeType = Output::StoreDouble; + break; + default: + DFG_CRASH(m_graph, m_node, "Bad typed array type"); + } + } + + if (m_node->arrayMode().isInBounds() || m_node->op() == PutByValAlias) + m_out.store(valueToStore, pointer, storeType); + else { + LBasicBlock isInBounds = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual(index, lowInt32(child5)), + unsure(continuation), unsure(isInBounds)); + + LBasicBlock lastNext = m_out.appendTo(isInBounds, continuation); + m_out.store(valueToStore, pointer, storeType); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + return; + } + + DFG_CRASH(m_graph, m_node, "Bad array type"); + break; + } + } + + void compilePutAccessorById() + { + LValue base = lowCell(m_node->child1()); + LValue accessor = lowCell(m_node->child2()); + auto uid = m_graph.identifiers()[m_node->identifierNumber()]; + vmCall( + m_out.voidType, + m_out.operation(m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById), + m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor); + } + + void compilePutGetterSetterById() + { + LValue base = lowCell(m_node->child1()); + LValue getter = lowJSValue(m_node->child2()); + LValue setter = lowJSValue(m_node->child3()); + auto uid = m_graph.identifiers()[m_node->identifierNumber()]; + vmCall( + m_out.voidType, m_out.operation(operationPutGetterSetter), + m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter); + + } + + void compilePutAccessorByVal() + { + LValue base = lowCell(m_node->child1()); + LValue subscript = lowJSValue(m_node->child2()); + LValue accessor = lowCell(m_node->child3()); + vmCall( + m_out.voidType, + m_out.operation(m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal), + m_callFrame, base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor); + } + + void compileArrayPush() + { + LValue base = lowCell(m_node->child1()); + LValue storage = lowStorage(m_node->child3()); + + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Contiguous: + case Array::Double: { + LValue value; + Output::StoreType storeType; + + if (m_node->arrayMode().type() != Array::Double) { + value = lowJSValue(m_node->child2(), ManualOperandSpeculation); + if (m_node->arrayMode().type() == Array::Int32) { + FTL_TYPE_CHECK( + jsValueValue(value), m_node->child2(), SpecInt32, isNotInt32(value)); + } + storeType = Output::Store64; + } else { + value = lowDouble(m_node->child2()); + FTL_TYPE_CHECK( + doubleValue(value), m_node->child2(), SpecDoubleReal, + m_out.doubleNotEqualOrUnordered(value, value)); + storeType = Output::StoreDouble; + } + + IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type()); + + LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength); + + LBasicBlock fastPath = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual( + prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)), + unsure(slowPath), unsure(fastPath)); + + LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath); + m_out.store( + value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType); + LValue newLength = m_out.add(prevLength, m_out.int32One); + m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength); + + ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength)); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + LValue operation; + if (m_node->arrayMode().type() != Array::Double) + operation = m_out.operation(operationArrayPush); + else + operation = m_out.operation(operationArrayPushDouble); + ValueFromBlock slowResult = m_out.anchor( + vmCall(m_out.int64, operation, m_callFrame, value, base)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, fastResult, slowResult)); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad array type"); + return; + } + } + + void compileArrayPop() + { + LValue base = lowCell(m_node->child1()); + LValue storage = lowStorage(m_node->child2()); + + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: { + IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type()); + + LBasicBlock fastCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength); + + Vector<ValueFromBlock, 3> results; + results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())))); + m_out.branch( + m_out.isZero32(prevLength), rarely(continuation), usually(fastCase)); + + LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase); + LValue newLength = m_out.sub(prevLength, m_out.int32One); + m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength); + TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength)); + if (m_node->arrayMode().type() != Array::Double) { + LValue result = m_out.load64(pointer); + m_out.store64(m_out.int64Zero, pointer); + results.append(m_out.anchor(result)); + m_out.branch( + m_out.notZero64(result), usually(continuation), rarely(slowCase)); + } else { + LValue result = m_out.loadDouble(pointer); + m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer); + results.append(m_out.anchor(boxDouble(result))); + m_out.branch( + m_out.doubleEqual(result, result), + usually(continuation), rarely(slowCase)); + } + + m_out.appendTo(slowCase, continuation); + results.append(m_out.anchor(vmCall( + m_out.int64, m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, results)); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad array type"); + return; + } + } + + void compileCreateActivation() + { + LValue scope = lowCell(m_node->child1()); + SymbolTable* table = m_node->castOperand<SymbolTable*>(); + Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure(); + JSValue initializationValue = m_node->initializationValueForActivation(); + ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue()); + if (table->singletonScope()->isStillValid()) { + LValue callResult = vmCall( + m_out.int64, + m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure), + scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue))); + setJSValue(callResult); + return; + } + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + LValue fastObject = allocateObject<JSLexicalEnvironment>( + JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath); + + // We don't need memory barriers since we just fast-created the activation, so the + // activation must be young. + m_out.storePtr(scope, fastObject, m_heaps.JSScope_next); + m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable); + + for (unsigned i = 0; i < table->scopeSize(); ++i) { + m_out.store64( + m_out.constInt64(JSValue::encode(initializationValue)), + fastObject, m_heaps.JSEnvironmentRecord_variables[i]); + } + + ValueFromBlock fastResult = m_out.anchor(fastObject); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + LValue callResult = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationCreateActivationDirect, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(), + CCallHelpers::TrustedImmPtr(table), + CCallHelpers::TrustedImm64(JSValue::encode(initializationValue))); + }, + scope); + ValueFromBlock slowResult = m_out.anchor(callResult); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult)); + } + + void compileNewFunction() + { + ASSERT(m_node->op() == NewFunction || m_node->op() == NewArrowFunction || m_node->op() == NewGeneratorFunction); + bool isGeneratorFunction = m_node->op() == NewGeneratorFunction; + + LValue scope = lowCell(m_node->child1()); + + FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>(); + if (executable->singletonFunction()->isStillValid()) { + LValue callResult = + isGeneratorFunction ? vmCall(m_out.int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) : + vmCall(m_out.int64, m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable)); + setJSValue(callResult); + return; + } + + Structure* structure = + isGeneratorFunction ? m_graph.globalObjectFor(m_node->origin.semantic)->generatorFunctionStructure() : + m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure(); + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + LValue fastObject = + isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) : + allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath); + + + // We don't need memory barriers since we just fast-created the function, so it + // must be young. + m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope); + m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable); + m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData); + + ValueFromBlock fastResult = m_out.anchor(fastObject); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + + Vector<LValue> slowPathArguments; + slowPathArguments.append(scope); + LValue callResult = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + if (isGeneratorFunction) { + return createLazyCallGenerator( + operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint, + locations[0].directGPR(), locations[1].directGPR(), + CCallHelpers::TrustedImmPtr(executable)); + } + return createLazyCallGenerator( + operationNewFunctionWithInvalidatedReallocationWatchpoint, + locations[0].directGPR(), locations[1].directGPR(), + CCallHelpers::TrustedImmPtr(executable)); + }, + slowPathArguments); + ValueFromBlock slowResult = m_out.anchor(callResult); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult)); + } + + void compileCreateDirectArguments() + { + // FIXME: A more effective way of dealing with the argument count and callee is to have + // them be explicit arguments to this node. + // https://bugs.webkit.org/show_bug.cgi?id=142207 + + Structure* structure = + m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure(); + + unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1; + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + ArgumentsLength length = getArgumentsLength(); + + LValue fastObject; + if (length.isKnown) { + fastObject = allocateObject<DirectArguments>( + DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure, + m_out.intPtrZero, slowPath); + } else { + LValue size = m_out.add( + m_out.shl(length.value, m_out.constInt32(3)), + m_out.constInt32(DirectArguments::storageOffset())); + + size = m_out.select( + m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)), + size, m_out.constInt32(DirectArguments::allocationSize(minCapacity))); + + fastObject = allocateVariableSizedObject<DirectArguments>( + size, structure, m_out.intPtrZero, slowPath); + } + + m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length); + m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity); + m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_overrides); + + ValueFromBlock fastResult = m_out.anchor(fastObject); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + LValue callResult = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationCreateDirectArguments, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(), + CCallHelpers::TrustedImm32(minCapacity)); + }, length.value); + ValueFromBlock slowResult = m_out.anchor(callResult); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue result = m_out.phi(m_out.intPtr, fastResult, slowResult); + + m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee); + + if (length.isKnown) { + VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic); + for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) { + m_out.store64( + m_out.load64(addressFor(start + i)), + result, m_heaps.DirectArguments_storage[i]); + } + } else { + LValue stackBase = getArgumentsStart(); + + LBasicBlock loop = m_out.newBlock(); + LBasicBlock end = m_out.newBlock(); + + ValueFromBlock originalLength; + if (minCapacity) { + LValue capacity = m_out.select( + m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)), + length.value, + m_out.constInt32(minCapacity)); + LValue originalLengthValue = m_out.zeroExtPtr(capacity); + originalLength = m_out.anchor(originalLengthValue); + m_out.jump(loop); + } else { + LValue originalLengthValue = m_out.zeroExtPtr(length.value); + originalLength = m_out.anchor(originalLengthValue); + m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop)); + } + + lastNext = m_out.appendTo(loop, end); + LValue previousIndex = m_out.phi(m_out.intPtr, originalLength); + LValue index = m_out.sub(previousIndex, m_out.intPtrOne); + m_out.store64( + m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)), + m_out.baseIndex(m_heaps.DirectArguments_storage, result, index)); + ValueFromBlock nextIndex = m_out.anchor(index); + m_out.addIncomingToPhi(previousIndex, nextIndex); + m_out.branch(m_out.isNull(index), unsure(end), unsure(loop)); + + m_out.appendTo(end, lastNext); + } + + setJSValue(result); + } + + void compileCreateScopedArguments() + { + LValue scope = lowCell(m_node->child1()); + + LValue result = vmCall( + m_out.int64, m_out.operation(operationCreateScopedArguments), m_callFrame, + weakPointer( + m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()), + getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope); + + setJSValue(result); + } + + void compileCreateClonedArguments() + { + LValue result = vmCall( + m_out.int64, m_out.operation(operationCreateClonedArguments), m_callFrame, + weakPointer( + m_graph.globalObjectFor(m_node->origin.semantic)->outOfBandArgumentsStructure()), + getArgumentsStart(), getArgumentsLength().value, getCurrentCallee()); + + setJSValue(result); + } + + void compileCopyRest() + { + LBasicBlock doCopyRest = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue arrayLength = lowInt32(m_node->child2()); + + m_out.branch( + m_out.equal(arrayLength, m_out.constInt32(0)), + unsure(continuation), unsure(doCopyRest)); + + LBasicBlock lastNext = m_out.appendTo(doCopyRest, continuation); + // Arguments: 0:exec, 1:JSCell* array, 2:arguments start, 3:number of arguments to skip, 4:array length + LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip()); + vmCall( + m_out.voidType,m_out.operation(operationCopyRest), m_callFrame, lowCell(m_node->child1()), + getArgumentsStart(), numberOfArgumentsToSkip, arrayLength); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void compileGetRestLength() + { + LBasicBlock nonZeroLength = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0)); + + LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip()); + LValue argumentsLength = getArgumentsLength().value; + m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip), + unsure(nonZeroLength), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation); + ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setInt32(m_out.phi(m_out.int32, zeroLengthResult, nonZeroLengthResult)); + } + + void compileNewObject() + { + setJSValue(allocateObject(m_node->structure())); + } + + void compileNewArray() + { + // First speculate appropriately on all of the children. Do this unconditionally up here + // because some of the slow paths may otherwise forget to do it. It's sort of arguable + // that doing the speculations up here might be unprofitable for RA - so we can consider + // sinking this to below the allocation fast path if we find that this has a lot of + // register pressure. + for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) + speculate(m_graph.varArgChild(m_node, operandIndex)); + + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation( + m_node->indexingType()); + + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) { + unsigned numElements = m_node->numChildren(); + + ArrayValues arrayValues = allocateJSArray(structure, numElements); + + for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) { + Edge edge = m_graph.varArgChild(m_node, operandIndex); + + switch (m_node->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + case ALL_UNDECIDED_INDEXING_TYPES: + DFG_CRASH(m_graph, m_node, "Bad indexing type"); + break; + + case ALL_DOUBLE_INDEXING_TYPES: + m_out.storeDouble( + lowDouble(edge), + arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]); + break; + + case ALL_INT32_INDEXING_TYPES: + case ALL_CONTIGUOUS_INDEXING_TYPES: + m_out.store64( + lowJSValue(edge, ManualOperandSpeculation), + arrayValues.butterfly, + m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex)); + break; + + default: + DFG_CRASH(m_graph, m_node, "Corrupt indexing type"); + break; + } + } + + setJSValue(arrayValues.array); + return; + } + + if (!m_node->numChildren()) { + setJSValue(vmCall( + m_out.int64, m_out.operation(operationNewEmptyArray), m_callFrame, + m_out.constIntPtr(structure))); + return; + } + + size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren(); + ASSERT(scratchSize); + ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize); + EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()); + + for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) { + Edge edge = m_graph.varArgChild(m_node, operandIndex); + m_out.store64( + lowJSValue(edge, ManualOperandSpeculation), + m_out.absolute(buffer + operandIndex)); + } + + m_out.storePtr( + m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->activeLengthPtr())); + + LValue result = vmCall( + m_out.int64, m_out.operation(operationNewArray), m_callFrame, + m_out.constIntPtr(structure), m_out.constIntPtr(buffer), + m_out.constIntPtr(m_node->numChildren())); + + m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->activeLengthPtr())); + + setJSValue(result); + } + + void compileNewArrayBuffer() + { + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation( + m_node->indexingType()); + + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) { + unsigned numElements = m_node->numConstants(); + + ArrayValues arrayValues = allocateJSArray(structure, numElements); + + JSValue* data = codeBlock()->constantBuffer(m_node->startConstant()); + for (unsigned index = 0; index < m_node->numConstants(); ++index) { + int64_t value; + if (hasDouble(m_node->indexingType())) + value = bitwise_cast<int64_t>(data[index].asNumber()); + else + value = JSValue::encode(data[index]); + + m_out.store64( + m_out.constInt64(value), + arrayValues.butterfly, + m_heaps.forIndexingType(m_node->indexingType())->at(index)); + } + + setJSValue(arrayValues.array); + return; + } + + setJSValue(vmCall( + m_out.int64, m_out.operation(operationNewArrayBuffer), m_callFrame, + m_out.constIntPtr(structure), m_out.constIntPtr(m_node->startConstant()), + m_out.constIntPtr(m_node->numConstants()))); + } + + void compileNewArrayWithSize() + { + LValue publicLength = lowInt32(m_node->child1()); + + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation( + m_node->indexingType()); + + if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) { + ASSERT( + hasUndecided(structure->indexingType()) + || hasInt32(structure->indexingType()) + || hasDouble(structure->indexingType()) + || hasContiguous(structure->indexingType())); + + LBasicBlock fastCase = m_out.newBlock(); + LBasicBlock largeCase = m_out.newBlock(); + LBasicBlock failCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)), + rarely(largeCase), usually(fastCase)); + + LBasicBlock lastNext = m_out.appendTo(fastCase, largeCase); + + // We don't round up to BASE_VECTOR_LEN for new Array(blah). + LValue vectorLength = publicLength; + + LValue payloadSize = + m_out.shl(m_out.zeroExt(vectorLength, m_out.intPtr), m_out.constIntPtr(3)); + + LValue butterflySize = m_out.add( + payloadSize, m_out.constIntPtr(sizeof(IndexingHeader))); + + LValue endOfStorage = allocateBasicStorageAndGetEnd(butterflySize, failCase); + + LValue butterfly = m_out.sub(endOfStorage, payloadSize); + + LValue object = allocateObject<JSArray>(structure, butterfly, failCase); + + m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength); + m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength); + + if (hasDouble(m_node->indexingType())) { + LBasicBlock initLoop = m_out.newBlock(); + LBasicBlock initDone = m_out.newBlock(); + + ValueFromBlock originalIndex = m_out.anchor(vectorLength); + ValueFromBlock originalPointer = m_out.anchor(butterfly); + m_out.branch( + m_out.notZero32(vectorLength), unsure(initLoop), unsure(initDone)); + + LBasicBlock initLastNext = m_out.appendTo(initLoop, initDone); + LValue index = m_out.phi(m_out.int32, originalIndex); + LValue pointer = m_out.phi(m_out.intPtr, originalPointer); + + m_out.store64( + m_out.constInt64(bitwise_cast<int64_t>(PNaN)), + TypedPointer(m_heaps.indexedDoubleProperties.atAnyIndex(), pointer)); + + LValue nextIndex = m_out.sub(index, m_out.int32One); + m_out.addIncomingToPhi(index, m_out.anchor(nextIndex)); + m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight))); + m_out.branch( + m_out.notZero32(nextIndex), unsure(initLoop), unsure(initDone)); + + m_out.appendTo(initDone, initLastNext); + } + + ValueFromBlock fastResult = m_out.anchor(object); + m_out.jump(continuation); + + m_out.appendTo(largeCase, failCase); + ValueFromBlock largeStructure = m_out.anchor(m_out.constIntPtr( + globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))); + m_out.jump(slowCase); + + m_out.appendTo(failCase, slowCase); + ValueFromBlock failStructure = m_out.anchor(m_out.constIntPtr(structure)); + m_out.jump(slowCase); + + m_out.appendTo(slowCase, continuation); + LValue structureValue = m_out.phi( + m_out.intPtr, largeStructure, failStructure); + LValue slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationNewArrayWithSize, locations[0].directGPR(), + locations[1].directGPR(), locations[2].directGPR()); + }, + structureValue, publicLength); + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult)); + return; + } + + LValue structureValue = m_out.select( + m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)), + m_out.constIntPtr( + globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), + m_out.constIntPtr(structure)); + setJSValue(vmCall(m_out.int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength)); + } + + void compileNewTypedArray() + { + TypedArrayType type = m_node->typedArrayType(); + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + + switch (m_node->child1().useKind()) { + case Int32Use: { + Structure* structure = globalObject->typedArrayStructure(type); + + LValue size = lowInt32(m_node->child1()); + + LBasicBlock smallEnoughCase = m_out.newBlock(); + LBasicBlock nonZeroCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)), + rarely(slowCase), usually(smallEnoughCase)); + + LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, nonZeroCase); + + m_out.branch(m_out.notZero32(size), usually(nonZeroCase), rarely(slowCase)); + + m_out.appendTo(nonZeroCase, slowCase); + + LValue byteSize = + m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(type))); + if (elementSize(type) < 8) { + byteSize = m_out.bitAnd( + m_out.add(byteSize, m_out.constIntPtr(7)), + m_out.constIntPtr(~static_cast<intptr_t>(7))); + } + + LValue storage = allocateBasicStorage(byteSize, slowCase); + + LValue fastResultValue = + allocateObject<JSArrayBufferView>(structure, m_out.intPtrZero, slowCase); + + m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector); + m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length); + m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode); + + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + m_out.jump(continuation); + + m_out.appendTo(slowCase, continuation); + + LValue slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationNewTypedArrayWithSizeForType(type), locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR()); + }, + size); + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult)); + return; + } + + case UntypedUse: { + LValue argument = lowJSValue(m_node->child1()); + + LValue result = vmCall( + m_out.intPtr, m_out.operation(operationNewTypedArrayWithOneArgumentForType(type)), + m_callFrame, weakPointer(globalObject->typedArrayStructure(type)), argument); + + setJSValue(result); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return; + } + } + + void compileAllocatePropertyStorage() + { + LValue object = lowCell(m_node->child1()); + setStorage(allocatePropertyStorage(object, m_node->transition()->previous)); + } + + void compileReallocatePropertyStorage() + { + Transition* transition = m_node->transition(); + LValue object = lowCell(m_node->child1()); + LValue oldStorage = lowStorage(m_node->child2()); + + setStorage( + reallocatePropertyStorage( + object, oldStorage, transition->previous, transition->next)); + } + + void compileToStringOrCallStringConstructor() + { + switch (m_node->child1().useKind()) { + case StringObjectUse: { + LValue cell = lowCell(m_node->child1()); + speculateStringObjectForCell(m_node->child1(), cell); + m_interpreter.filter(m_node->child1(), SpecStringObject); + + setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue)); + return; + } + + case StringOrStringObjectUse: { + LValue cell = lowCell(m_node->child1()); + LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID); + + LBasicBlock notString = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock simpleResult = m_out.anchor(cell); + m_out.branch( + m_out.equal(structureID, m_out.constInt32(vm().stringStructure->id())), + unsure(continuation), unsure(notString)); + + LBasicBlock lastNext = m_out.appendTo(notString, continuation); + speculateStringObjectForStructureID(m_node->child1(), structureID); + ValueFromBlock unboxedResult = m_out.anchor( + m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, simpleResult, unboxedResult)); + + m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject); + return; + } + + case CellUse: + case UntypedUse: { + LValue value; + if (m_node->child1().useKind() == CellUse) + value = lowCell(m_node->child1()); + else + value = lowJSValue(m_node->child1()); + + LBasicBlock isCell = m_out.newBlock(); + LBasicBlock notString = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue isCellPredicate; + if (m_node->child1().useKind() == CellUse) + isCellPredicate = m_out.booleanTrue; + else + isCellPredicate = this->isCell(value, provenType(m_node->child1())); + m_out.branch(isCellPredicate, unsure(isCell), unsure(notString)); + + LBasicBlock lastNext = m_out.appendTo(isCell, notString); + ValueFromBlock simpleResult = m_out.anchor(value); + LValue isStringPredicate; + if (m_node->child1()->prediction() & SpecString) { + isStringPredicate = isString(value, provenType(m_node->child1())); + } else + isStringPredicate = m_out.booleanFalse; + m_out.branch(isStringPredicate, unsure(continuation), unsure(notString)); + + m_out.appendTo(notString, continuation); + LValue operation; + if (m_node->child1().useKind() == CellUse) + operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell); + else + operation = m_out.operation(m_node->op() == ToString ? operationToString : operationCallStringConstructor); + ValueFromBlock convertedResult = m_out.anchor(vmCall(m_out.int64, operation, m_callFrame, value)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, simpleResult, convertedResult)); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + } + + void compileToPrimitive() + { + LValue value = lowJSValue(m_node->child1()); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock isObjectCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock, 3> results; + + results.append(m_out.anchor(value)); + m_out.branch( + isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase); + results.append(m_out.anchor(value)); + m_out.branch( + isObject(value, provenType(m_node->child1())), + unsure(isObjectCase), unsure(continuation)); + + m_out.appendTo(isObjectCase, continuation); + results.append(m_out.anchor(vmCall( + m_out.int64, m_out.operation(operationToPrimitive), m_callFrame, value))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, results)); + } + + void compileMakeRope() + { + LValue kids[3]; + unsigned numKids; + kids[0] = lowCell(m_node->child1()); + kids[1] = lowCell(m_node->child2()); + if (m_node->child3()) { + kids[2] = lowCell(m_node->child3()); + numKids = 3; + } else { + kids[2] = 0; + numKids = 2; + } + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + MarkedAllocator& allocator = + vm().heap.allocatorForObjectWithDestructor(sizeof(JSRopeString)); + + LValue result = allocateCell( + m_out.constIntPtr(&allocator), + vm().stringStructure.get(), + slowPath); + + m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSString_value); + for (unsigned i = 0; i < numKids; ++i) + m_out.storePtr(kids[i], result, m_heaps.JSRopeString_fibers[i]); + for (unsigned i = numKids; i < JSRopeString::s_maxInternalRopeLength; ++i) + m_out.storePtr(m_out.intPtrZero, result, m_heaps.JSRopeString_fibers[i]); + LValue flags = m_out.load32(kids[0], m_heaps.JSString_flags); + LValue length = m_out.load32(kids[0], m_heaps.JSString_length); + for (unsigned i = 1; i < numKids; ++i) { + flags = m_out.bitAnd(flags, m_out.load32(kids[i], m_heaps.JSString_flags)); + CheckValue* lengthCheck = m_out.speculateAdd( + length, m_out.load32(kids[i], m_heaps.JSString_length)); + blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin); + length = lengthCheck; + } + m_out.store32( + m_out.bitAnd(m_out.constInt32(JSString::Is8Bit), flags), + result, m_heaps.JSString_flags); + m_out.store32(length, result, m_heaps.JSString_length); + + ValueFromBlock fastResult = m_out.anchor(result); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + LValue slowResultValue; + switch (numKids) { + case 2: + slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(), + locations[2].directGPR()); + }, kids[0], kids[1]); + break; + case 3: + slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(), + locations[2].directGPR(), locations[3].directGPR()); + }, kids[0], kids[1], kids[2]); + break; + default: + DFG_CRASH(m_graph, m_node, "Bad number of children"); + break; + } + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, fastResult, slowResult)); + } + + void compileStringCharAt() + { + LValue base = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + LBasicBlock fastPath = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual( + index, m_out.load32NonNegative(base, m_heaps.JSString_length)), + rarely(slowPath), usually(fastPath)); + + LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath); + + LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value); + + LBasicBlock is8Bit = m_out.newBlock(); + LBasicBlock is16Bit = m_out.newBlock(); + LBasicBlock bitsContinuation = m_out.newBlock(); + LBasicBlock bigCharacter = m_out.newBlock(); + + m_out.branch( + m_out.testIsZero32( + m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIs8Bit())), + unsure(is16Bit), unsure(is8Bit)); + + m_out.appendTo(is8Bit, is16Bit); + + ValueFromBlock char8Bit = m_out.anchor( + m_out.load8ZeroExt32(m_out.baseIndex( + m_heaps.characters8, storage, m_out.zeroExtPtr(index), + provenValue(m_node->child2())))); + m_out.jump(bitsContinuation); + + m_out.appendTo(is16Bit, bigCharacter); + + LValue char16BitValue = m_out.load16ZeroExt32( + m_out.baseIndex( + m_heaps.characters16, storage, m_out.zeroExtPtr(index), + provenValue(m_node->child2()))); + ValueFromBlock char16Bit = m_out.anchor(char16BitValue); + m_out.branch( + m_out.aboveOrEqual(char16BitValue, m_out.constInt32(0x100)), + rarely(bigCharacter), usually(bitsContinuation)); + + m_out.appendTo(bigCharacter, bitsContinuation); + + Vector<ValueFromBlock, 4> results; + results.append(m_out.anchor(vmCall( + m_out.int64, m_out.operation(operationSingleCharacterString), + m_callFrame, char16BitValue))); + m_out.jump(continuation); + + m_out.appendTo(bitsContinuation, slowPath); + + LValue character = m_out.phi(m_out.int32, char8Bit, char16Bit); + + LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings()); + + results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex( + m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character))))); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + + if (m_node->arrayMode().isInBounds()) { + speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue); + results.append(m_out.anchor(m_out.intPtrZero)); + } else { + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + + if (globalObject->stringPrototypeChainIsSane()) { + // FIXME: This could be captured using a Speculation mode that means + // "out-of-bounds loads return a trivial value", something like + // SaneChainOutOfBounds. + // https://bugs.webkit.org/show_bug.cgi?id=144668 + + m_graph.watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet()); + m_graph.watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet()); + + LBasicBlock negativeIndex = m_out.newBlock(); + + results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())))); + m_out.branch( + m_out.lessThan(index, m_out.int32Zero), + rarely(negativeIndex), usually(continuation)); + + m_out.appendTo(negativeIndex, continuation); + } + + results.append(m_out.anchor(vmCall( + m_out.int64, m_out.operation(operationGetByValStringInt), m_callFrame, base, index))); + } + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, results)); + } + + void compileStringCharCodeAt() + { + LBasicBlock is8Bit = m_out.newBlock(); + LBasicBlock is16Bit = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue base = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + speculate( + Uncountable, noValue(), 0, + m_out.aboveOrEqual( + index, m_out.load32NonNegative(base, m_heaps.JSString_length))); + + LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value); + + m_out.branch( + m_out.testIsZero32( + m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIs8Bit())), + unsure(is16Bit), unsure(is8Bit)); + + LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit); + + ValueFromBlock char8Bit = m_out.anchor( + m_out.load8ZeroExt32(m_out.baseIndex( + m_heaps.characters8, storage, m_out.zeroExtPtr(index), + provenValue(m_node->child2())))); + m_out.jump(continuation); + + m_out.appendTo(is16Bit, continuation); + + ValueFromBlock char16Bit = m_out.anchor( + m_out.load16ZeroExt32(m_out.baseIndex( + m_heaps.characters16, storage, m_out.zeroExtPtr(index), + provenValue(m_node->child2())))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + setInt32(m_out.phi(m_out.int32, char8Bit, char16Bit)); + } + + void compileStringFromCharCode() + { + Edge childEdge = m_node->child1(); + + if (childEdge.useKind() == UntypedUse) { + LValue result = vmCall( + m_out.int64, m_out.operation(operationStringFromCharCodeUntyped), m_callFrame, + lowJSValue(childEdge)); + setJSValue(result); + return; + } + + DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use); + + LValue value = lowInt32(childEdge); + + LBasicBlock smallIntCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.aboveOrEqual(value, m_out.constInt32(0xff)), + rarely(slowCase), usually(smallIntCase)); + + LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase); + + LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings()); + LValue fastResultValue = m_out.loadPtr( + m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value))); + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + m_out.jump(continuation); + + m_out.appendTo(slowCase, continuation); + + LValue slowResultValue = vmCall( + m_out.intPtr, m_out.operation(operationStringFromCharCode), m_callFrame, value); + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + setJSValue(m_out.phi(m_out.int64, fastResult, slowResult)); + } + + void compileGetByOffset() + { + StorageAccessData& data = m_node->storageAccessData(); + + setJSValue(loadProperty( + lowStorage(m_node->child1()), data.identifierNumber, data.offset)); + } + + void compileGetGetter() + { + setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter)); + } + + void compileGetSetter() + { + setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter)); + } + + void compileMultiGetByOffset() + { + LValue base = lowCell(m_node->child1()); + + MultiGetByOffsetData& data = m_node->multiGetByOffsetData(); + + if (data.cases.isEmpty()) { + // Protect against creating a Phi function with zero inputs. LLVM didn't like that. + // It's not clear if this is needed anymore. + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=154382 + terminate(BadCache); + return; + } + + Vector<LBasicBlock, 2> blocks(data.cases.size()); + for (unsigned i = data.cases.size(); i--;) + blocks[i] = m_out.newBlock(); + LBasicBlock exit = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<SwitchCase, 2> cases; + StructureSet baseSet; + for (unsigned i = data.cases.size(); i--;) { + MultiGetByOffsetCase getCase = data.cases[i]; + for (unsigned j = getCase.set().size(); j--;) { + Structure* structure = getCase.set()[j]; + baseSet.add(structure); + cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1))); + } + } + m_out.switchInstruction( + m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0)); + + LBasicBlock lastNext = m_out.m_nextBlock; + + Vector<ValueFromBlock, 2> results; + for (unsigned i = data.cases.size(); i--;) { + MultiGetByOffsetCase getCase = data.cases[i]; + GetByOffsetMethod method = getCase.method(); + + m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit); + + LValue result; + + switch (method.kind()) { + case GetByOffsetMethod::Invalid: + RELEASE_ASSERT_NOT_REACHED(); + break; + + case GetByOffsetMethod::Constant: + result = m_out.constInt64(JSValue::encode(method.constant()->value())); + break; + + case GetByOffsetMethod::Load: + case GetByOffsetMethod::LoadFromPrototype: { + LValue propertyBase; + if (method.kind() == GetByOffsetMethod::Load) + propertyBase = base; + else + propertyBase = weakPointer(method.prototype()->value().asCell()); + if (!isInlineOffset(method.offset())) + propertyBase = loadButterflyReadOnly(propertyBase); + result = loadProperty( + propertyBase, data.identifierNumber, method.offset()); + break; + } } + + results.append(m_out.anchor(result)); + m_out.jump(continuation); + } + + m_out.appendTo(exit, continuation); + if (!m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet)) + speculate(BadCache, noValue(), nullptr, m_out.booleanTrue); + m_out.unreachable(); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, results)); + } + + void compilePutByOffset() + { + StorageAccessData& data = m_node->storageAccessData(); + + storeProperty( + lowJSValue(m_node->child3()), + lowStorage(m_node->child1()), data.identifierNumber, data.offset); + } + + void compileMultiPutByOffset() + { + LValue base = lowCell(m_node->child1()); + LValue value = lowJSValue(m_node->child2()); + + MultiPutByOffsetData& data = m_node->multiPutByOffsetData(); + + Vector<LBasicBlock, 2> blocks(data.variants.size()); + for (unsigned i = data.variants.size(); i--;) + blocks[i] = m_out.newBlock(); + LBasicBlock exit = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<SwitchCase, 2> cases; + StructureSet baseSet; + for (unsigned i = data.variants.size(); i--;) { + PutByIdVariant variant = data.variants[i]; + for (unsigned j = variant.oldStructure().size(); j--;) { + Structure* structure = variant.oldStructure()[j]; + baseSet.add(structure); + cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1))); + } + } + m_out.switchInstruction( + m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0)); + + LBasicBlock lastNext = m_out.m_nextBlock; + + for (unsigned i = data.variants.size(); i--;) { + m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit); + + PutByIdVariant variant = data.variants[i]; + + checkInferredType(m_node->child2(), value, variant.requiredType()); + + LValue storage; + if (variant.kind() == PutByIdVariant::Replace) { + if (isInlineOffset(variant.offset())) + storage = base; + else + storage = loadButterflyWithBarrier(base); + } else { + m_graph.m_plan.transitions.addLazily( + codeBlock(), m_node->origin.semantic.codeOriginOwner(), + variant.oldStructureForTransition(), variant.newStructure()); + + storage = storageForTransition( + base, variant.offset(), + variant.oldStructureForTransition(), variant.newStructure()); + + ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType()); + ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags()); + ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type()); + m_out.store32( + weakStructureID(variant.newStructure()), base, m_heaps.JSCell_structureID); + } + + storeProperty(value, storage, data.identifierNumber, variant.offset()); + m_out.jump(continuation); + } + + m_out.appendTo(exit, continuation); + if (!m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet)) + speculate(BadCache, noValue(), nullptr, m_out.booleanTrue); + m_out.unreachable(); + + m_out.appendTo(continuation, lastNext); + } + + void compileGetGlobalVariable() + { + setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer()))); + } + + void compilePutGlobalVariable() + { + m_out.store64( + lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer())); + } + + void compileNotifyWrite() + { + WatchpointSet* set = m_node->watchpointSet(); + + LBasicBlock isNotInvalidated = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState())); + m_out.branch( + m_out.equal(state, m_out.constInt32(IsInvalidated)), + usually(continuation), rarely(isNotInvalidated)); + + LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation); + + lazySlowPath( + [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set)); + }); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void compileGetCallee() + { + setJSValue(m_out.loadPtr(addressFor(JSStack::Callee))); + } + + void compileGetArgumentCount() + { + setInt32(m_out.load32(payloadFor(JSStack::ArgumentCount))); + } + + void compileGetScope() + { + setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope)); + } + + void compileSkipScope() + { + setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next)); + } + + void compileGetClosureVar() + { + setJSValue( + m_out.load64( + lowCell(m_node->child1()), + m_heaps.JSEnvironmentRecord_variables[m_node->scopeOffset().offset()])); + } + + void compilePutClosureVar() + { + m_out.store64( + lowJSValue(m_node->child2()), + lowCell(m_node->child1()), + m_heaps.JSEnvironmentRecord_variables[m_node->scopeOffset().offset()]); + } + + void compileGetFromArguments() + { + setJSValue( + m_out.load64( + lowCell(m_node->child1()), + m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()])); + } + + void compilePutToArguments() + { + m_out.store64( + lowJSValue(m_node->child2()), + lowCell(m_node->child1()), + m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]); + } + + void compileCompareEq() + { + if (m_node->isBinaryUseKind(Int32Use) + || m_node->isBinaryUseKind(Int52RepUse) + || m_node->isBinaryUseKind(DoubleRepUse) + || m_node->isBinaryUseKind(ObjectUse) + || m_node->isBinaryUseKind(BooleanUse) + || m_node->isBinaryUseKind(SymbolUse) + || m_node->isBinaryUseKind(StringIdentUse) + || m_node->isBinaryUseKind(StringUse)) { + compileCompareStrictEq(); + return; + } + + if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) { + compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1()); + return; + } + + if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) { + compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2()); + return; + } + + if (m_node->isBinaryUseKind(UntypedUse)) { + nonSpeculativeCompare( + [&] (LValue left, LValue right) { + return m_out.equal(left, right); + }, + operationCompareEq); + return; + } + + if (m_node->child1().useKind() == OtherUse) { + ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther)); + setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation)); + return; + } + + if (m_node->child2().useKind() == OtherUse) { + ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther)); + setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation)); + return; + } + + DFG_CRASH(m_graph, m_node, "Bad use kinds"); + } + + void compileCompareStrictEq() + { + if (m_node->isBinaryUseKind(Int32Use)) { + setBoolean( + m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2()))); + return; + } + + if (m_node->isBinaryUseKind(Int52RepUse)) { + Int52Kind kind; + LValue left = lowWhicheverInt52(m_node->child1(), kind); + LValue right = lowInt52(m_node->child2(), kind); + setBoolean(m_out.equal(left, right)); + return; + } + + if (m_node->isBinaryUseKind(DoubleRepUse)) { + setBoolean( + m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2()))); + return; + } + + if (m_node->isBinaryUseKind(StringIdentUse)) { + setBoolean( + m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2()))); + return; + } + + if (m_node->isBinaryUseKind(StringUse)) { + LValue left = lowCell(m_node->child1()); + LValue right = lowCell(m_node->child2()); + + LBasicBlock notTriviallyEqualCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + speculateString(m_node->child1(), left); + + ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue); + m_out.branch( + m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase)); + + LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation); + + speculateString(m_node->child2(), right); + + ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, fastResult, slowResult)); + return; + } + + if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) { + setBoolean( + m_out.equal( + lowNonNullObject(m_node->child1()), + lowJSValue(m_node->child2()))); + return; + } + + if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) { + setBoolean( + m_out.equal( + lowNonNullObject(m_node->child2()), + lowJSValue(m_node->child1()))); + return; + } + + if (m_node->isBinaryUseKind(ObjectUse)) { + setBoolean( + m_out.equal( + lowNonNullObject(m_node->child1()), + lowNonNullObject(m_node->child2()))); + return; + } + + if (m_node->isBinaryUseKind(BooleanUse)) { + setBoolean( + m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2()))); + return; + } + + if (m_node->isBinaryUseKind(SymbolUse)) { + LValue left = lowSymbol(m_node->child1()); + LValue right = lowSymbol(m_node->child2()); + LValue leftStringImpl = m_out.loadPtr(left, m_heaps.Symbol_privateName); + LValue rightStringImpl = m_out.loadPtr(right, m_heaps.Symbol_privateName); + setBoolean(m_out.equal(leftStringImpl, rightStringImpl)); + return; + } + + if (m_node->isBinaryUseKind(MiscUse, UntypedUse) + || m_node->isBinaryUseKind(UntypedUse, MiscUse)) { + speculate(m_node->child1()); + speculate(m_node->child2()); + LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation); + LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation); + setBoolean(m_out.equal(left, right)); + return; + } + + if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse) + || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) { + Edge leftEdge = m_node->childFor(StringIdentUse); + Edge rightEdge = m_node->childFor(NotStringVarUse); + + LValue left = lowStringIdent(leftEdge); + LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock isStringCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isCell(rightValue, provenType(rightEdge)), + unsure(isCellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase); + ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isString(rightValue, provenType(rightEdge)), + unsure(isStringCase), unsure(continuation)); + + m_out.appendTo(isStringCase, continuation); + LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value); + speculateStringIdent(rightEdge, rightValue, right); + ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, notCellResult, notStringResult, isStringResult)); + return; + } + + DFG_CRASH(m_graph, m_node, "Bad use kinds"); + } + + void compileCompareStrictEqConstant() + { + JSValue constant = m_node->child2()->asJSValue(); + + setBoolean( + m_out.equal( + lowJSValue(m_node->child1()), + m_out.constInt64(JSValue::encode(constant)))); + } + + void compileCompareLess() + { + compare( + [&] (LValue left, LValue right) { + return m_out.lessThan(left, right); + }, + [&] (LValue left, LValue right) { + return m_out.doubleLessThan(left, right); + }, + operationCompareLess); + } + + void compileCompareLessEq() + { + compare( + [&] (LValue left, LValue right) { + return m_out.lessThanOrEqual(left, right); + }, + [&] (LValue left, LValue right) { + return m_out.doubleLessThanOrEqual(left, right); + }, + operationCompareLessEq); + } + + void compileCompareGreater() + { + compare( + [&] (LValue left, LValue right) { + return m_out.greaterThan(left, right); + }, + [&] (LValue left, LValue right) { + return m_out.doubleGreaterThan(left, right); + }, + operationCompareGreater); + } + + void compileCompareGreaterEq() + { + compare( + [&] (LValue left, LValue right) { + return m_out.greaterThanOrEqual(left, right); + }, + [&] (LValue left, LValue right) { + return m_out.doubleGreaterThanOrEqual(left, right); + }, + operationCompareGreaterEq); + } + + void compileLogicalNot() + { + setBoolean(m_out.logicalNot(boolify(m_node->child1()))); + } + + void compileCallOrConstruct() + { + Node* node = m_node; + unsigned numArgs = node->numChildren() - 1; + + LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0)); + + unsigned frameSize = JSStack::CallFrameHeaderSize + numArgs; + unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), frameSize); + + // JS->JS calling convention requires that the caller allows this much space on top of stack to + // get trashed by the callee, even if not all of that space is used to pass arguments. We tell + // B3 this explicitly for two reasons: + // + // - We will only pass frameSize worth of stuff. + // - The trashed stack guarantee is logically separate from the act of passing arguments, so we + // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends + // up seeing. + m_proc.requestCallArgAreaSize(alignedFrameSize); + + // Collect the arguments, since this can generate code and we want to generate it before we emit + // the call. + Vector<ConstrainedValue> arguments; + + // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the + // callee to be. + arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0))); + + auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) { + intptr_t offsetFromSP = + (reg.offset() - JSStack::CallerFrameAndPCSize) * sizeof(EncodedJSValue) + offset; + arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP))); + }; + + addArgument(jsCallee, VirtualRegister(JSStack::Callee), 0); + addArgument(m_out.constInt32(numArgs), VirtualRegister(JSStack::ArgumentCount), PayloadOffset); + for (unsigned i = 0; i < numArgs; ++i) + addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0); + + PatchpointValue* patchpoint = m_out.patchpoint(Int64); + patchpoint->appendVector(arguments); + + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall()); + patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR); + + CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite(); + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin); + + exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); + + jit.store32( + CCallHelpers::TrustedImm32(callSiteIndex.bits()), + CCallHelpers::tagFor(VirtualRegister(JSStack::ArgumentCount))); + + CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(); + + CCallHelpers::DataLabelPtr targetToCheck; + CCallHelpers::Jump slowPath = jit.branchPtrWithPatch( + CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck, + CCallHelpers::TrustedImmPtr(0)); + + CCallHelpers::Call fastCall = jit.nearCall(); + CCallHelpers::Jump done = jit.jump(); + + slowPath.link(&jit); + + jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); + CCallHelpers::Call slowCall = jit.nearCall(); + done.link(&jit); + + callLinkInfo->setUpCall( + node->op() == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call, + node->origin.semantic, GPRInfo::regT0); + + jit.addPtr( + CCallHelpers::TrustedImm32(-params.proc().frameSize()), + GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + MacroAssemblerCodePtr linkCall = + linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code(); + linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress())); + + callLinkInfo->setCallLocations( + linkBuffer.locationOfNearCall(slowCall), + linkBuffer.locationOf(targetToCheck), + linkBuffer.locationOfNearCall(fastCall)); + }); + }); + + setJSValue(patchpoint); + } + + void compileTailCall() + { + Node* node = m_node; + unsigned numArgs = node->numChildren() - 1; + + LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0)); + + // We want B3 to give us all of the arguments using whatever mechanism it thinks is + // convenient. The generator then shuffles those arguments into our own call frame, + // destroying our frame in the process. + + // Note that we don't have to do anything special for exceptions. A tail call is only a + // tail call if it is not inside a try block. + + Vector<ConstrainedValue> arguments; + + arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0))); + + for (unsigned i = 0; i < numArgs; ++i) { + // Note: we could let the shuffler do boxing for us, but it's not super clear that this + // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler + // that 32-bit values could land at 4-byte alignment but not 8-byte alignment. + + ConstrainedValue constrainedValue( + lowJSValue(m_graph.varArgChild(node, 1 + i)), + ValueRep::WarmAny); + arguments.append(constrainedValue); + } + + PatchpointValue* patchpoint = m_out.patchpoint(Void); + patchpoint->appendVector(arguments); + + // Prevent any of the arguments from using the scratch register. + patchpoint->clobberEarly(RegisterSet::macroScratchRegisters()); + + // We don't have to tell the patchpoint that we will clobber registers, since we won't return + // anyway. + + CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite(); + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin); + + CallFrameShuffleData shuffleData; + shuffleData.numLocals = state->jitCode->common.frameRegisterCount; + shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS); + + for (unsigned i = 0; i < numArgs; ++i) + shuffleData.args.append(params[1 + i].recoveryForJSValue()); + + shuffleData.setupCalleeSaveRegisters(jit.codeBlock()); + + CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(); + + CCallHelpers::DataLabelPtr targetToCheck; + CCallHelpers::Jump slowPath = jit.branchPtrWithPatch( + CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck, + CCallHelpers::TrustedImmPtr(0)); + + callLinkInfo->setFrameShuffleData(shuffleData); + CallFrameShuffler(jit, shuffleData).prepareForTailCall(); + + CCallHelpers::Call fastCall = jit.nearTailCall(); + + slowPath.link(&jit); + + // Yes, this is really necessary. You could throw an exception in a host call on the + // slow path. That'll route us to lookupExceptionHandler(), which unwinds starting + // with the call site index of our frame. Bad things happen if it's not set. + jit.store32( + CCallHelpers::TrustedImm32(callSiteIndex.bits()), + CCallHelpers::tagFor(VirtualRegister(JSStack::ArgumentCount))); + + CallFrameShuffler slowPathShuffler(jit, shuffleData); + slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0)); + slowPathShuffler.prepareForSlowPath(); + + jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); + CCallHelpers::Call slowCall = jit.nearCall(); + + jit.abortWithReason(JITDidReturnFromTailCall); + + callLinkInfo->setUpCall(CallLinkInfo::TailCall, codeOrigin, GPRInfo::regT0); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + MacroAssemblerCodePtr linkCall = + linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code(); + linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress())); + + callLinkInfo->setCallLocations( + linkBuffer.locationOfNearCall(slowCall), + linkBuffer.locationOf(targetToCheck), + linkBuffer.locationOfNearCall(fastCall)); + }); + }); + + m_out.unreachable(); + } + + void compileCallOrConstructVarargs() + { + Node* node = m_node; + LValue jsCallee = lowJSValue(m_node->child1()); + LValue thisArg = lowJSValue(m_node->child3()); + + LValue jsArguments = nullptr; + bool forwarding = false; + + switch (node->op()) { + case CallVarargs: + case TailCallVarargs: + case TailCallVarargsInlinedCaller: + case ConstructVarargs: + jsArguments = lowJSValue(node->child2()); + break; + case CallForwardVarargs: + case TailCallForwardVarargs: + case TailCallForwardVarargsInlinedCaller: + case ConstructForwardVarargs: + forwarding = true; + break; + default: + DFG_CRASH(m_graph, node, "bad node type"); + break; + } + + PatchpointValue* patchpoint = m_out.patchpoint(Int64); + + // Append the forms of the arguments that we will use before any clobbering happens. + patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0)); + if (jsArguments) + patchpoint->appendSomeRegister(jsArguments); + patchpoint->appendSomeRegister(thisArg); + + if (!forwarding) { + // Now append them again for after clobbering. Note that the compiler may ask us to use a + // different register for the late for the post-clobbering version of the value. This gives + // the compiler a chance to spill these values without having to burn any callee-saves. + patchpoint->append(jsCallee, ValueRep::LateColdAny); + patchpoint->append(jsArguments, ValueRep::LateColdAny); + patchpoint->append(thisArg, ValueRep::LateColdAny); + } + + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall()); + patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR); + + // This is the minimum amount of call arg area stack space that all JS->JS calls always have. + unsigned minimumJSCallAreaSize = + sizeof(CallerFrameAndPC) + + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue)); + + m_proc.requestCallArgAreaSize(minimumJSCallAreaSize); + + CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite(); + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + CallSiteIndex callSiteIndex = + state->jitCode->common.addUniqueCallSiteIndex(codeOrigin); + + Box<CCallHelpers::JumpList> exceptions = + exceptionHandle->scheduleExitCreation(params)->jumps(jit); + + exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); + + jit.store32( + CCallHelpers::TrustedImm32(callSiteIndex.bits()), + CCallHelpers::tagFor(VirtualRegister(JSStack::ArgumentCount))); + + CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(); + CallVarargsData* data = node->callVarargsData(); + + unsigned argIndex = 1; + GPRReg calleeGPR = params[argIndex++].gpr(); + ASSERT(calleeGPR == GPRInfo::regT0); + GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg; + GPRReg thisGPR = params[argIndex++].gpr(); + + B3::ValueRep calleeLateRep; + B3::ValueRep argumentsLateRep; + B3::ValueRep thisLateRep; + if (!forwarding) { + // If we're not forwarding then we'll need callee, arguments, and this after we + // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique + // for this is to supply all of those operands as late uses in addition to + // specifying them as early uses. It's possible that the late use uses a spill + // while the early use uses a register, and it's possible for the late and early + // uses to use different registers. We do know that the late uses interfere with + // all volatile registers and so won't use those, but the early uses may use + // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it + // definitely will. + // + // Note that we have to be super careful with these. It's possible that these + // use a shuffling of the registers used for calleeGPR, argumentsGPR, and + // thisGPR. If that happens and we do for example: + // + // calleeLateRep.emitRestore(jit, calleeGPR); + // argumentsLateRep.emitRestore(jit, calleeGPR); + // + // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and + // argumentsLateRep.gpr() == calleeGPR. + // + // We do a variety of things to prevent this from happening. For example, we use + // argumentsLateRep before needing the other two and after we've already stopped + // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that + // the *LateReps cannot use volatile registers (so they cannot be regT0, so + // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()). + // + // An alternative would have been to just use early uses and early-clobber all + // volatile registers. But that would force callee, arguments, and this into + // callee-save registers even if we have to spill them. We don't want spilling to + // use up three callee-saves. + // + // TL;DR: The way we use LateReps here is dangerous and barely works but achieves + // some desirable performance properties, so don't mistake the cleverness for + // elegance. + calleeLateRep = params[argIndex++]; + argumentsLateRep = params[argIndex++]; + thisLateRep = params[argIndex++]; + } + + // Get some scratch registers. + RegisterSet usedRegisters; + usedRegisters.merge(RegisterSet::stackRegisters()); + usedRegisters.merge(RegisterSet::reservedHardwareRegisters()); + usedRegisters.merge(RegisterSet::calleeSaveRegisters()); + usedRegisters.set(calleeGPR); + if (argumentsGPR != InvalidGPRReg) + usedRegisters.set(argumentsGPR); + usedRegisters.set(thisGPR); + if (calleeLateRep.isReg()) + usedRegisters.set(calleeLateRep.reg()); + if (argumentsLateRep.isReg()) + usedRegisters.set(argumentsLateRep.reg()); + if (thisLateRep.isReg()) + usedRegisters.set(thisLateRep.reg()); + ScratchRegisterAllocator allocator(usedRegisters); + GPRReg scratchGPR1 = allocator.allocateScratchGPR(); + GPRReg scratchGPR2 = allocator.allocateScratchGPR(); + GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg; + RELEASE_ASSERT(!allocator.numberOfReusedRegisters()); + + auto callWithExceptionCheck = [&] (void* callee) { + jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR); + jit.call(GPRInfo::nonPreservedNonArgumentGPR); + exceptions->append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth)); + }; + + auto adjustStack = [&] (GPRReg amount) { + jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), amount, CCallHelpers::stackPointerRegister); + }; + + unsigned originalStackHeight = params.proc().frameSize(); + + if (forwarding) { + jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2); + + CCallHelpers::JumpList slowCase; + emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase); + + CCallHelpers::Jump done = jit.jump(); + slowCase.link(&jit); + jit.setupArgumentsExecState(); + callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs)); + jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); + + done.link(&jit); + + adjustStack(scratchGPR2); + } else { + jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1); + jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset)); + callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs)); + + jit.move(GPRInfo::returnValueGPR, scratchGPR1); + jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2); + argumentsLateRep.emitRestore(jit, argumentsGPR); + emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2); + jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister); + jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1); + callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame)); + + adjustStack(GPRInfo::returnValueGPR); + + calleeLateRep.emitRestore(jit, GPRInfo::regT0); + + // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed + // that thisGPR != GPRInfo::regT0 because regT0 interferes with it. + thisLateRep.emitRestore(jit, thisGPR); + } + + jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee)); + jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0)); + + CallLinkInfo::CallType callType; + if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs) + callType = CallLinkInfo::ConstructVarargs; + else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs) + callType = CallLinkInfo::TailCallVarargs; + else + callType = CallLinkInfo::CallVarargs; + + bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail; + + CCallHelpers::DataLabelPtr targetToCheck; + CCallHelpers::Jump slowPath = jit.branchPtrWithPatch( + CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck, + CCallHelpers::TrustedImmPtr(0)); + + CCallHelpers::Call fastCall; + CCallHelpers::Jump done; + + if (isTailCall) { + jit.emitRestoreCalleeSaves(); + jit.prepareForTailCallSlow(); + fastCall = jit.nearTailCall(); + } else { + fastCall = jit.nearCall(); + done = jit.jump(); + } + + slowPath.link(&jit); + + if (isTailCall) + jit.emitRestoreCalleeSaves(); + jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); + CCallHelpers::Call slowCall = jit.nearCall(); + + if (isTailCall) + jit.abortWithReason(JITDidReturnFromTailCall); + else + done.link(&jit); + + callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0); + + jit.addPtr( + CCallHelpers::TrustedImm32(-originalStackHeight), + GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + MacroAssemblerCodePtr linkCall = + linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code(); + linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress())); + + callLinkInfo->setCallLocations( + linkBuffer.locationOfNearCall(slowCall), + linkBuffer.locationOf(targetToCheck), + linkBuffer.locationOfNearCall(fastCall)); + }); + }); + + switch (node->op()) { + case TailCallVarargs: + case TailCallForwardVarargs: + m_out.unreachable(); + break; + + default: + setJSValue(patchpoint); + break; + } + } + + void compileLoadVarargs() + { + LoadVarargsData* data = m_node->loadVarargsData(); + LValue jsArguments = lowJSValue(m_node->child1()); + + LValue length = vmCall( + m_out.int32, m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments, + m_out.constInt32(data->offset)); + + // FIXME: There is a chance that we will call an effectful length property twice. This is safe + // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance + // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right + // past the sizing. + // https://bugs.webkit.org/show_bug.cgi?id=141448 + + LValue lengthIncludingThis = m_out.add(length, m_out.int32One); + speculate( + VarargsOverflow, noValue(), nullptr, + m_out.above(lengthIncludingThis, m_out.constInt32(data->limit))); + + m_out.store32(lengthIncludingThis, payloadFor(data->machineCount)); + + // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead + // of a VirtualRegister, we wouldn't have to do this. + // https://bugs.webkit.org/show_bug.cgi?id=141660 + LValue machineStart = m_out.lShr( + m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame), + m_out.constIntPtr(3)); + + vmCall( + m_out.voidType, m_out.operation(operationLoadVarargs), m_callFrame, + m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset), + length, m_out.constInt32(data->mandatoryMinimum)); + } + + void compileForwardVarargs() + { + LoadVarargsData* data = m_node->loadVarargsData(); + InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame; + + LValue length = getArgumentsLength(inlineCallFrame).value; + LValue lengthIncludingThis = m_out.add(length, m_out.constInt32(1 - data->offset)); + + speculate( + VarargsOverflow, noValue(), nullptr, + m_out.above(lengthIncludingThis, m_out.constInt32(data->limit))); + + m_out.store32(lengthIncludingThis, payloadFor(data->machineCount)); + + LValue sourceStart = getArgumentsStart(inlineCallFrame); + LValue targetStart = addressFor(data->machineStart).value(); + + LBasicBlock undefinedLoop = m_out.newBlock(); + LBasicBlock mainLoopEntry = m_out.newBlock(); + LBasicBlock mainLoop = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue lengthAsPtr = m_out.zeroExtPtr(length); + LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum); + ValueFromBlock loopBound = m_out.anchor(loopBoundValue); + m_out.branch( + m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry)); + + LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry); + LValue previousIndex = m_out.phi(m_out.intPtr, loopBound); + LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne); + m_out.store64( + m_out.constInt64(JSValue::encode(jsUndefined())), + m_out.baseIndex(m_heaps.variables, targetStart, currentIndex)); + ValueFromBlock nextIndex = m_out.anchor(currentIndex); + m_out.addIncomingToPhi(previousIndex, nextIndex); + m_out.branch( + m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry)); + + m_out.appendTo(mainLoopEntry, mainLoop); + loopBound = m_out.anchor(lengthAsPtr); + m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation)); + + m_out.appendTo(mainLoop, continuation); + previousIndex = m_out.phi(m_out.intPtr, loopBound); + currentIndex = m_out.sub(previousIndex, m_out.intPtrOne); + LValue value = m_out.load64( + m_out.baseIndex( + m_heaps.variables, sourceStart, + m_out.add(currentIndex, m_out.constIntPtr(data->offset)))); + m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex)); + nextIndex = m_out.anchor(currentIndex); + m_out.addIncomingToPhi(previousIndex, nextIndex); + m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop)); + + m_out.appendTo(continuation, lastNext); + } + + void compileJump() + { + m_out.jump(lowBlock(m_node->targetBlock())); + } + + void compileBranch() + { + m_out.branch( + boolify(m_node->child1()), + WeightedTarget( + lowBlock(m_node->branchData()->taken.block), + m_node->branchData()->taken.count), + WeightedTarget( + lowBlock(m_node->branchData()->notTaken.block), + m_node->branchData()->notTaken.count)); + } + + void compileSwitch() + { + SwitchData* data = m_node->switchData(); + switch (data->kind) { + case SwitchImm: { + Vector<ValueFromBlock, 2> intValues; + LBasicBlock switchOnInts = m_out.newBlock(); + + LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts); + + switch (m_node->child1().useKind()) { + case Int32Use: { + intValues.append(m_out.anchor(lowInt32(m_node->child1()))); + m_out.jump(switchOnInts); + break; + } + + case UntypedUse: { + LBasicBlock isInt = m_out.newBlock(); + LBasicBlock isNotInt = m_out.newBlock(); + LBasicBlock isDouble = m_out.newBlock(); + + LValue boxedValue = lowJSValue(m_node->child1()); + m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt)); + + LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt); + + intValues.append(m_out.anchor(unboxInt32(boxedValue))); + m_out.jump(switchOnInts); + + m_out.appendTo(isNotInt, isDouble); + m_out.branch( + isCellOrMisc(boxedValue, provenType(m_node->child1())), + usually(lowBlock(data->fallThrough.block)), rarely(isDouble)); + + m_out.appendTo(isDouble, innerLastNext); + LValue doubleValue = unboxDouble(boxedValue); + LValue intInDouble = m_out.doubleToInt(doubleValue); + intValues.append(m_out.anchor(intInDouble)); + m_out.branch( + m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue), + unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block))); + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + + m_out.appendTo(switchOnInts, lastNext); + buildSwitch(data, m_out.int32, m_out.phi(m_out.int32, intValues)); + return; + } + + case SwitchChar: { + LValue stringValue; + + // FIXME: We should use something other than unsure() for the branch weight + // of the fallThrough block. The main challenge is just that we have multiple + // branches to fallThrough but a single count, so we would need to divvy it up + // among the different lowered branches. + // https://bugs.webkit.org/show_bug.cgi?id=129082 + + switch (m_node->child1().useKind()) { + case StringUse: { + stringValue = lowString(m_node->child1()); + break; + } + + case UntypedUse: { + LValue unboxedValue = lowJSValue(m_node->child1()); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock isStringCase = m_out.newBlock(); + + m_out.branch( + isNotCell(unboxedValue, provenType(m_node->child1())), + unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase); + LValue cellValue = unboxedValue; + m_out.branch( + isNotString(cellValue, provenType(m_node->child1())), + unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase)); + + m_out.appendTo(isStringCase, lastNext); + stringValue = cellValue; + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + break; + } + + LBasicBlock lengthIs1 = m_out.newBlock(); + LBasicBlock needResolution = m_out.newBlock(); + LBasicBlock resolved = m_out.newBlock(); + LBasicBlock is8Bit = m_out.newBlock(); + LBasicBlock is16Bit = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.notEqual( + m_out.load32NonNegative(stringValue, m_heaps.JSString_length), + m_out.int32One), + unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1)); + + LBasicBlock lastNext = m_out.appendTo(lengthIs1, needResolution); + Vector<ValueFromBlock, 2> values; + LValue fastValue = m_out.loadPtr(stringValue, m_heaps.JSString_value); + values.append(m_out.anchor(fastValue)); + m_out.branch(m_out.isNull(fastValue), rarely(needResolution), usually(resolved)); + + m_out.appendTo(needResolution, resolved); + values.append(m_out.anchor( + vmCall(m_out.intPtr, m_out.operation(operationResolveRope), m_callFrame, stringValue))); + m_out.jump(resolved); + + m_out.appendTo(resolved, is8Bit); + LValue value = m_out.phi(m_out.intPtr, values); + LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data); + m_out.branch( + m_out.testNonZero32( + m_out.load32(value, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIs8Bit())), + unsure(is8Bit), unsure(is16Bit)); + + Vector<ValueFromBlock, 2> characters; + m_out.appendTo(is8Bit, is16Bit); + characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0]))); + m_out.jump(continuation); + + m_out.appendTo(is16Bit, continuation); + characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0]))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + buildSwitch(data, m_out.int32, m_out.phi(m_out.int32, characters)); + return; + } + + case SwitchString: { + switch (m_node->child1().useKind()) { + case StringIdentUse: { + LValue stringImpl = lowStringIdent(m_node->child1()); + + Vector<SwitchCase> cases; + for (unsigned i = 0; i < data->cases.size(); ++i) { + LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl()); + LBasicBlock block = lowBlock(data->cases[i].target.block); + Weight weight = Weight(data->cases[i].target.count); + cases.append(SwitchCase(value, block, weight)); + } + + m_out.switchInstruction( + stringImpl, cases, lowBlock(data->fallThrough.block), + Weight(data->fallThrough.count)); + return; + } + + case StringUse: { + switchString(data, lowString(m_node->child1())); + return; + } + + case UntypedUse: { + LValue value = lowJSValue(m_node->child1()); + + LBasicBlock isCellBlock = m_out.newBlock(); + LBasicBlock isStringBlock = m_out.newBlock(); + + m_out.branch( + isCell(value, provenType(m_node->child1())), + unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block))); + + LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock); + + m_out.branch( + isString(value, provenType(m_node->child1())), + unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block))); + + m_out.appendTo(isStringBlock, lastNext); + + switchString(data, value); + return; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return; + } + return; + } + + case SwitchCell: { + LValue cell; + switch (m_node->child1().useKind()) { + case CellUse: { + cell = lowCell(m_node->child1()); + break; + } + + case UntypedUse: { + LValue value = lowJSValue(m_node->child1()); + LBasicBlock cellCase = m_out.newBlock(); + m_out.branch( + isCell(value, provenType(m_node->child1())), + unsure(cellCase), unsure(lowBlock(data->fallThrough.block))); + m_out.appendTo(cellCase); + cell = value; + break; + } + + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return; + } + + buildSwitch(m_node->switchData(), m_out.intPtr, cell); + return; + } } + + DFG_CRASH(m_graph, m_node, "Bad switch kind"); + } + + void compileReturn() + { + m_out.ret(lowJSValue(m_node->child1())); + } + + void compileForceOSRExit() + { + terminate(InadequateCoverage); + } + + void compileThrow() + { + terminate(Uncountable); + } + + void compileInvalidationPoint() + { + if (verboseCompilationEnabled()) + dataLog(" Invalidation point with availability: ", availabilityMap(), "\n"); + + DFG_ASSERT(m_graph, m_node, m_origin.exitOK); + + PatchpointValue* patchpoint = m_out.patchpoint(Void); + OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr); + NodeOrigin origin = m_origin; + patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue())); + + State* state = &m_ftlState; + + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { + // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method + // will ensure that this is followed by a nop shadow but only when this is actually + // necessary. + CCallHelpers::Label label = jit.watchpointLabel(); + + RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater( + *state, UncountableInvalidation, origin, params); + + RefPtr<JITCode> jitCode = state->jitCode.get(); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + JumpReplacement jumpReplacement( + linkBuffer.locationOf(label), + linkBuffer.locationOf(handle->label)); + jitCode->common.jumpReplacements.append(jumpReplacement); + }); + }); + + // Set some obvious things. + patchpoint->effects.terminal = false; + patchpoint->effects.writesLocalState = false; + patchpoint->effects.readsLocalState = false; + + // This is how we tell B3 about the possibility of jump replacement. + patchpoint->effects.exitsSideways = true; + + // It's not possible for some prior branch to determine the safety of this operation. It's always + // fine to execute this on some path that wouldn't have originally executed it before + // optimization. + patchpoint->effects.controlDependent = false; + + // If this falls through then it won't write anything. + patchpoint->effects.writes = HeapRange(); + + // When this abruptly terminates, it could read any heap location. + patchpoint->effects.reads = HeapRange::top(); + } + + void compileIsUndefined() + { + setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined)); + } + + void compileIsBoolean() + { + setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1()))); + } + + void compileIsNumber() + { + setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1()))); + } + + void compileIsString() + { + LValue value = lowJSValue(m_node->child1()); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation); + ValueFromBlock cellResult = m_out.anchor(isString(value, provenType(m_node->child1()))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, notCellResult, cellResult)); + } + + void compileIsObject() + { + LValue value = lowJSValue(m_node->child1()); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation); + ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1()))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, notCellResult, cellResult)); + } + + void compileIsObjectOrNull() + { + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + + Edge child = m_node->child1(); + LValue value = lowJSValue(child); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notFunctionCase = m_out.newBlock(); + LBasicBlock objectCase = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase); + ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isFunction(value, provenType(child)), + unsure(continuation), unsure(notFunctionCase)); + + m_out.appendTo(notFunctionCase, objectCase); + ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isObject(value, provenType(child)), + unsure(objectCase), unsure(continuation)); + + m_out.appendTo(objectCase, slowPath); + ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue); + m_out.branch( + isExoticForTypeof(value, provenType(child)), + rarely(slowPath), usually(continuation)); + + m_out.appendTo(slowPath, notCellCase); + LValue slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationObjectIsObject, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR()); + }, value); + ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue)); + m_out.jump(continuation); + + m_out.appendTo(notCellCase, continuation); + LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull()))); + ValueFromBlock notCellResult = m_out.anchor(notCellResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue result = m_out.phi( + m_out.boolean, + isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult); + setBoolean(result); + } + + void compileIsFunction() + { + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + + Edge child = m_node->child1(); + LValue value = lowJSValue(child); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notFunctionCase = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isCell(value, provenType(child)), unsure(cellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase); + ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue); + m_out.branch( + isFunction(value, provenType(child)), + unsure(continuation), unsure(notFunctionCase)); + + m_out.appendTo(notFunctionCase, slowPath); + ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse); + m_out.branch( + isExoticForTypeof(value, provenType(child)), + rarely(slowPath), usually(continuation)); + + m_out.appendTo(slowPath, continuation); + LValue slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationObjectIsFunction, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR()); + }, value); + ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue result = m_out.phi( + m_out.boolean, notCellResult, functionResult, objectResult, slowResult); + setBoolean(result); + } + + void compileTypeOf() + { + Edge child = m_node->child1(); + LValue value = lowJSValue(child); + + LBasicBlock continuation = m_out.newBlock(); + LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation); + + Vector<ValueFromBlock> results; + + buildTypeOf( + child, value, + [&] (TypeofType type) { + results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type)))); + m_out.jump(continuation); + }); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, results)); + } + + void compileIn() + { + Node* node = m_node; + Edge base = node->child2(); + LValue cell = lowCell(base); + if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) { + if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) { + UniquedStringImpl* str = bitwise_cast<UniquedStringImpl*>(string->tryGetValueImpl()); + B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64); + patchpoint->appendSomeRegister(cell); + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + GPRReg baseGPR = params[1].gpr(); + GPRReg resultGPR = params[0].gpr(); + + StructureStubInfo* stubInfo = + jit.codeBlock()->addStubInfo(AccessType::In); + stubInfo->callSiteIndex = + state->jitCode->common.addCodeOrigin(node->origin.semantic); + stubInfo->codeOrigin = node->origin.semantic; + stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR); + stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR); + stubInfo->patch.usedRegisters = params.unavailableRegisters(); + + CCallHelpers::PatchableJump jump = jit.patchableJump(); + CCallHelpers::Label done = jit.label(); + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + jump.m_jump.link(&jit); + CCallHelpers::Label slowPathBegin = jit.label(); + CCallHelpers::Call slowPathCall = callOperation( + *state, params.unavailableRegisters(), jit, + node->origin.semantic, nullptr, operationInOptimize, + resultGPR, CCallHelpers::TrustedImmPtr(stubInfo), baseGPR, + CCallHelpers::TrustedImmPtr(str)).call(); + jit.jump().linkTo(done, &jit); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + CodeLocationCall callReturnLocation = + linkBuffer.locationOf(slowPathCall); + stubInfo->patch.deltaCallToDone = + CCallHelpers::differenceBetweenCodePtr( + callReturnLocation, + linkBuffer.locationOf(done)); + stubInfo->patch.deltaCallToJump = + CCallHelpers::differenceBetweenCodePtr( + callReturnLocation, + linkBuffer.locationOf(jump)); + stubInfo->callReturnLocation = callReturnLocation; + stubInfo->patch.deltaCallToSlowCase = + CCallHelpers::differenceBetweenCodePtr( + callReturnLocation, + linkBuffer.locationOf(slowPathBegin)); + }); + }); + }); + + setJSValue(patchpoint); + return; + } + } + + setJSValue(vmCall(m_out.int64, m_out.operation(operationGenericIn), m_callFrame, cell, lowJSValue(m_node->child1()))); + } + + void compileOverridesHasInstance() + { + JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(m_node->cellOperand()->value()); + + LValue constructor = lowCell(m_node->child1()); + LValue hasInstance = lowJSValue(m_node->child2()); + + LBasicBlock defaultHasInstance = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us. + + ASSERT(!m_node->child2().node()->isCellConstant() || defaultHasInstanceFunction == m_node->child2().node()->asCell()); + + ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue); + m_out.branch(m_out.notEqual(hasInstance, m_out.constIntPtr(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance)); + + LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation); + ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32( + m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(ImplementsDefaultHasInstance))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult)); + } + + void compileCheckTypeInfoFlags() + { + speculate( + BadTypeInfoFlags, noValue(), 0, + m_out.testIsZero32( + m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(m_node->typeInfoOperand()))); + } + + void compileInstanceOf() + { + LValue cell; + + if (m_node->child1().useKind() == UntypedUse) + cell = lowJSValue(m_node->child1()); + else + cell = lowCell(m_node->child1()); + + LValue prototype = lowCell(m_node->child2()); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock loop = m_out.newBlock(); + LBasicBlock notYetInstance = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue condition; + if (m_node->child1().useKind() == UntypedUse) + condition = isCell(cell, provenType(m_node->child1())); + else + condition = m_out.booleanTrue; + + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.branch(condition, unsure(isCellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, loop); + + speculate(BadType, noValue(), 0, isNotObject(prototype, provenType(m_node->child2()))); + + ValueFromBlock originalValue = m_out.anchor(cell); + m_out.jump(loop); + + m_out.appendTo(loop, notYetInstance); + LValue value = m_out.phi(m_out.int64, originalValue); + LValue structure = loadStructure(value); + LValue currentPrototype = m_out.load64(structure, m_heaps.Structure_prototype); + ValueFromBlock isInstanceResult = m_out.anchor(m_out.booleanTrue); + m_out.branch( + m_out.equal(currentPrototype, prototype), + unsure(continuation), unsure(notYetInstance)); + + m_out.appendTo(notYetInstance, continuation); + ValueFromBlock notInstanceResult = m_out.anchor(m_out.booleanFalse); + m_out.addIncomingToPhi(value, m_out.anchor(currentPrototype)); + m_out.branch(isCell(currentPrototype), unsure(loop), unsure(continuation)); + + m_out.appendTo(continuation, lastNext); + setBoolean( + m_out.phi(m_out.boolean, notCellResult, isInstanceResult, notInstanceResult)); + } + + void compileInstanceOfCustom() + { + LValue value = lowJSValue(m_node->child1()); + LValue constructor = lowCell(m_node->child2()); + LValue hasInstance = lowJSValue(m_node->child3()); + + setBoolean(m_out.logicalNot(m_out.equal(m_out.constInt32(0), vmCall(m_out.int32, m_out.operation(operationInstanceOfCustom), m_callFrame, value, constructor, hasInstance)))); + } + + void compileCountExecution() + { + TypedPointer counter = m_out.absolute(m_node->executionCounter()->address()); + m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter); + } + + void compileStoreBarrier() + { + emitStoreBarrier(lowCell(m_node->child1())); + } + + void compileHasIndexedProperty() + { + switch (m_node->arrayMode().type()) { + case Array::Int32: + case Array::Contiguous: { + LValue base = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ? + m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties; + + LBasicBlock checkHole = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + if (!m_node->arrayMode().isInBounds()) { + m_out.branch( + m_out.aboveOrEqual( + index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)), + rarely(slowCase), usually(checkHole)); + } else + m_out.jump(checkHole); + + LBasicBlock lastNext = m_out.appendTo(checkHole, slowCase); + LValue checkHoleResultValue = + m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_node->child2()))); + ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue); + m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase)); + + m_out.appendTo(slowCase, continuation); + ValueFromBlock slowResult = m_out.anchor(m_out.equal( + m_out.constInt64(JSValue::encode(jsBoolean(true))), + vmCall(m_out.int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, checkHoleResult, slowResult)); + return; + } + case Array::Double: { + LValue base = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + LValue storage = lowStorage(m_node->child3()); + + IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties; + + LBasicBlock checkHole = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + if (!m_node->arrayMode().isInBounds()) { + m_out.branch( + m_out.aboveOrEqual( + index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)), + rarely(slowCase), usually(checkHole)); + } else + m_out.jump(checkHole); + + LBasicBlock lastNext = m_out.appendTo(checkHole, slowCase); + LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_node->child2())); + LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue); + ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue); + m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase)); + + m_out.appendTo(slowCase, continuation); + ValueFromBlock slowResult = m_out.anchor(m_out.equal( + m_out.constInt64(JSValue::encode(jsBoolean(true))), + vmCall(m_out.int64, m_out.operation(operationHasIndexedProperty), m_callFrame, base, index))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, checkHoleResult, slowResult)); + return; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + return; + } + } + + void compileHasGenericProperty() + { + LValue base = lowJSValue(m_node->child1()); + LValue property = lowCell(m_node->child2()); + setJSValue(vmCall(m_out.int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property)); + } + + void compileHasStructureProperty() + { + LValue base = lowJSValue(m_node->child1()); + LValue property = lowString(m_node->child2()); + LValue enumerator = lowCell(m_node->child3()); + + LBasicBlock correctStructure = m_out.newBlock(); + LBasicBlock wrongStructure = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(m_out.notEqual( + m_out.load32(base, m_heaps.JSCell_structureID), + m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)), + rarely(wrongStructure), usually(correctStructure)); + + LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure); + ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue); + m_out.jump(continuation); + + m_out.appendTo(wrongStructure, continuation); + ValueFromBlock wrongStructureResult = m_out.anchor( + m_out.equal( + m_out.constInt64(JSValue::encode(jsBoolean(true))), + vmCall(m_out.int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, correctStructureResult, wrongStructureResult)); + } + + void compileGetDirectPname() + { + LValue base = lowCell(m_graph.varArgChild(m_node, 0)); + LValue property = lowCell(m_graph.varArgChild(m_node, 1)); + LValue index = lowInt32(m_graph.varArgChild(m_node, 2)); + LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3)); + + LBasicBlock checkOffset = m_out.newBlock(); + LBasicBlock inlineLoad = m_out.newBlock(); + LBasicBlock outOfLineLoad = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(m_out.notEqual( + m_out.load32(base, m_heaps.JSCell_structureID), + m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)), + rarely(slowCase), usually(checkOffset)); + + LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad); + m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)), + unsure(outOfLineLoad), unsure(inlineLoad)); + + m_out.appendTo(inlineLoad, outOfLineLoad); + ValueFromBlock inlineResult = m_out.anchor( + m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), + base, m_out.zeroExt(index, m_out.int64), ScaleEight, JSObject::offsetOfInlineStorage()))); + m_out.jump(continuation); + + m_out.appendTo(outOfLineLoad, slowCase); + LValue storage = loadButterflyReadOnly(base); + LValue realIndex = m_out.signExt32To64( + m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)))); + int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); + ValueFromBlock outOfLineResult = m_out.anchor( + m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty))); + m_out.jump(continuation); + + m_out.appendTo(slowCase, continuation); + ValueFromBlock slowCaseResult = m_out.anchor( + vmCall(m_out.int64, m_out.operation(operationGetByVal), m_callFrame, base, property)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, inlineResult, outOfLineResult, slowCaseResult)); + } + + void compileGetEnumerableLength() + { + LValue enumerator = lowCell(m_node->child1()); + setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength)); + } + + void compileGetPropertyEnumerator() + { + LValue base = lowCell(m_node->child1()); + setJSValue(vmCall(m_out.int64, m_out.operation(operationGetPropertyEnumerator), m_callFrame, base)); + } + + void compileGetEnumeratorStructurePname() + { + LValue enumerator = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + + LBasicBlock inBounds = m_out.newBlock(); + LBasicBlock outOfBounds = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)), + usually(inBounds), rarely(outOfBounds)); + + LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds); + LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector); + ValueFromBlock inBoundsResult = m_out.anchor( + m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index)))); + m_out.jump(continuation); + + m_out.appendTo(outOfBounds, continuation); + ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, inBoundsResult, outOfBoundsResult)); + } + + void compileGetEnumeratorGenericPname() + { + LValue enumerator = lowCell(m_node->child1()); + LValue index = lowInt32(m_node->child2()); + + LBasicBlock inBounds = m_out.newBlock(); + LBasicBlock outOfBounds = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)), + usually(inBounds), rarely(outOfBounds)); + + LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds); + LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector); + ValueFromBlock inBoundsResult = m_out.anchor( + m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index)))); + m_out.jump(continuation); + + m_out.appendTo(outOfBounds, continuation); + ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setJSValue(m_out.phi(m_out.int64, inBoundsResult, outOfBoundsResult)); + } + + void compileToIndexString() + { + LValue index = lowInt32(m_node->child1()); + setJSValue(vmCall(m_out.int64, m_out.operation(operationToIndexString), m_callFrame, index)); + } + + void compileCheckStructureImmediate() + { + LValue structure = lowCell(m_node->child1()); + checkStructure( + structure, noValue(), BadCache, m_node->structureSet(), + [this] (Structure* structure) { + return weakStructure(structure); + }); + } + + void compileMaterializeNewObject() + { + ObjectMaterializationData& data = m_node->objectMaterializationData(); + + // Lower the values first, to avoid creating values inside a control flow diamond. + + Vector<LValue, 8> values; + for (unsigned i = 0; i < data.m_properties.size(); ++i) + values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i))); + + const StructureSet& set = m_node->structureSet(); + + Vector<LBasicBlock, 1> blocks(set.size()); + for (unsigned i = set.size(); i--;) + blocks[i] = m_out.newBlock(); + LBasicBlock dummyDefault = m_out.newBlock(); + LBasicBlock outerContinuation = m_out.newBlock(); + + Vector<SwitchCase, 1> cases(set.size()); + for (unsigned i = set.size(); i--;) + cases[i] = SwitchCase(weakStructure(set[i]), blocks[i], Weight(1)); + m_out.switchInstruction( + lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0)); + + LBasicBlock outerLastNext = m_out.m_nextBlock; + + Vector<ValueFromBlock, 1> results; + + for (unsigned i = set.size(); i--;) { + m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault); + + Structure* structure = set[i]; + + LValue object; + LValue butterfly; + + if (structure->outOfLineCapacity()) { + size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); + MarkedAllocator* allocator = &vm().heap.allocatorForObjectWithoutDestructor(allocationSize); + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + LValue endOfStorage = allocateBasicStorageAndGetEnd( + m_out.constIntPtr(structure->outOfLineCapacity() * sizeof(JSValue)), + slowPath); + + LValue fastButterflyValue = m_out.add( + m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage); + + LValue fastObjectValue = allocateObject( + m_out.constIntPtr(allocator), structure, fastButterflyValue, slowPath); + + ValueFromBlock fastObject = m_out.anchor(fastObjectValue); + ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + + LValue slowObjectValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationNewObjectWithButterfly, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure)); + }); + ValueFromBlock slowObject = m_out.anchor(slowObjectValue); + ValueFromBlock slowButterfly = m_out.anchor( + m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly)); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + object = m_out.phi(m_out.intPtr, fastObject, slowObject); + butterfly = m_out.phi(m_out.intPtr, fastButterfly, slowButterfly); + } else { + // In the easy case where we can do a one-shot allocation, we simply allocate the + // object to directly have the desired structure. + object = allocateObject(structure); + butterfly = nullptr; // Don't have one, don't need one. + } + + for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) { + for (unsigned i = data.m_properties.size(); i--;) { + PhantomPropertyValue value = data.m_properties[i]; + if (m_graph.identifiers()[value.m_identifierNumber] != entry.key) + continue; + + LValue base = isInlineOffset(entry.offset) ? object : butterfly; + storeProperty(values[i], base, value.m_identifierNumber, entry.offset); + break; + } + } + + results.append(m_out.anchor(object)); + m_out.jump(outerContinuation); + } + + m_out.appendTo(dummyDefault, outerContinuation); + m_out.unreachable(); + + m_out.appendTo(outerContinuation, outerLastNext); + setJSValue(m_out.phi(m_out.intPtr, results)); + } + + void compileMaterializeCreateActivation() + { + ObjectMaterializationData& data = m_node->objectMaterializationData(); + + Vector<LValue, 8> values; + for (unsigned i = 0; i < data.m_properties.size(); ++i) + values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i))); + + LValue scope = lowCell(m_graph.varArgChild(m_node, 1)); + SymbolTable* table = m_node->castOperand<SymbolTable*>(); + ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>()); + Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure(); + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + LValue fastObject = allocateObject<JSLexicalEnvironment>( + JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath); + + m_out.storePtr(scope, fastObject, m_heaps.JSScope_next); + m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable); + + + ValueFromBlock fastResult = m_out.anchor(fastObject); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + // We ensure allocation sinking explictly sets bottom values for all field members. + // Therefore, it doesn't matter what JSValue we pass in as the initialization value + // because all fields will be overwritten. + // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that + // doesn't initialize every slot because we are guaranteed to do that here. + LValue callResult = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationCreateActivationDirect, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure), locations[1].directGPR(), + CCallHelpers::TrustedImmPtr(table), + CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined()))); + }, scope); + ValueFromBlock slowResult = m_out.anchor(callResult); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue activation = m_out.phi(m_out.intPtr, fastResult, slowResult); + RELEASE_ASSERT(data.m_properties.size() == table->scopeSize()); + for (unsigned i = 0; i < data.m_properties.size(); ++i) { + m_out.store64(values[i], + activation, + m_heaps.JSEnvironmentRecord_variables[data.m_properties[i].m_identifierNumber]); + } + + if (validationEnabled()) { + // Validate to make sure every slot in the scope has one value. + ConcurrentJITLocker locker(table->m_lock); + for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) { + bool found = false; + for (unsigned i = 0; i < data.m_properties.size(); ++i) { + if (iter->value.scopeOffset().offset() == data.m_properties[i].m_identifierNumber) { + found = true; + break; + } + } + ASSERT_UNUSED(found, found); + } + } + + setJSValue(activation); + } + + void compileCheckWatchdogTimer() + { + LBasicBlock timerDidFire = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().watchdog()->timerDidFireAddress())); + m_out.branch(m_out.isZero32(state), + usually(continuation), rarely(timerDidFire)); + + LBasicBlock lastNext = m_out.appendTo(timerDidFire, continuation); + + lazySlowPath( + [=] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator(operationHandleWatchdogTimer, InvalidGPRReg); + }); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void compileRegExpExec() + { + if (m_node->child1().useKind() == CellUse + && m_node->child2().useKind() == CellUse) { + LValue base = lowCell(m_node->child1()); + LValue argument = lowCell(m_node->child2()); + setJSValue( + vmCall(Int64, m_out.operation(operationRegExpExec), m_callFrame, base, argument)); + return; + } + + LValue base = lowJSValue(m_node->child1()); + LValue argument = lowJSValue(m_node->child2()); + setJSValue( + vmCall(Int64, m_out.operation(operationRegExpExecGeneric), m_callFrame, base, argument)); + } + + void compileRegExpTest() + { + if (m_node->child1().useKind() == CellUse + && m_node->child2().useKind() == CellUse) { + LValue base = lowCell(m_node->child1()); + LValue argument = lowCell(m_node->child2()); + setBoolean( + vmCall(Int32, m_out.operation(operationRegExpTest), m_callFrame, base, argument)); + return; + } + + LValue base = lowJSValue(m_node->child1()); + LValue argument = lowJSValue(m_node->child2()); + setBoolean( + vmCall(Int32, m_out.operation(operationRegExpTestGeneric), m_callFrame, base, argument)); + } + + void compileNewRegexp() + { + // FIXME: We really should be able to inline code that uses NewRegexp. That means not + // reaching into the CodeBlock here. + // https://bugs.webkit.org/show_bug.cgi?id=154808 + + LValue result = vmCall( + pointerType(), + m_out.operation(operationNewRegexp), m_callFrame, + m_out.constIntPtr(codeBlock()->regexp(m_node->regexpIndex()))); + + setJSValue(result); + } + + void compileStringReplace() + { + if (m_node->child1().useKind() == StringUse + && m_node->child2().useKind() == RegExpObjectUse + && m_node->child3().useKind() == StringUse) { + + if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>()) { + if (!replace->length()) { + LValue string = lowString(m_node->child1()); + LValue regExp = lowCell(m_node->child2()); + speculateRegExpObject(m_node->child2(), regExp); + + LValue result = vmCall( + Int64, m_out.operation(operationStringProtoFuncReplaceRegExpEmptyStr), + m_callFrame, string, regExp); + + setJSValue(result); + return; + } + } + + LValue string = lowString(m_node->child1()); + LValue regExp = lowCell(m_node->child2()); + speculateRegExpObject(m_node->child2(), regExp); + LValue replace = lowString(m_node->child3()); + + LValue result = vmCall( + Int64, m_out.operation(operationStringProtoFuncReplaceRegExpString), + m_callFrame, string, regExp, replace); + + setJSValue(result); + return; + } + + LValue result = vmCall( + Int64, m_out.operation(operationStringProtoFuncReplaceGeneric), m_callFrame, + lowJSValue(m_node->child1()), lowJSValue(m_node->child2()), + lowJSValue(m_node->child3())); + + setJSValue(result); + } + + LValue didOverflowStack() + { + // This does a very simple leaf function analysis. The invariant of FTL call + // frames is that the caller had already done enough of a stack check to + // prove that this call frame has enough stack to run, and also enough stack + // to make runtime calls. So, we only need to stack check when making calls + // to other JS functions. If we don't find such calls then we don't need to + // do any stack checks. + + for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { + DFG::BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + + for (unsigned nodeIndex = block->size(); nodeIndex--;) { + Node* node = block->at(nodeIndex); + + switch (node->op()) { + case GetById: + case PutById: + case Call: + case Construct: + return m_out.below( + m_callFrame, + m_out.loadPtr( + m_out.absolute(vm().addressOfFTLStackLimit()))); + + default: + break; + } + } + } + + return m_out.booleanFalse; + } + + struct ArgumentsLength { + ArgumentsLength() + : isKnown(false) + , known(UINT_MAX) + , value(nullptr) + { + } + + bool isKnown; + unsigned known; + LValue value; + }; + ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame) + { + ArgumentsLength length; + + if (inlineCallFrame && !inlineCallFrame->isVarargs()) { + length.known = inlineCallFrame->arguments.size() - 1; + length.isKnown = true; + length.value = m_out.constInt32(length.known); + } else { + length.known = UINT_MAX; + length.isKnown = false; + + VirtualRegister argumentCountRegister; + if (!inlineCallFrame) + argumentCountRegister = VirtualRegister(JSStack::ArgumentCount); + else + argumentCountRegister = inlineCallFrame->argumentCountRegister; + length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One); + } + + return length; + } + + ArgumentsLength getArgumentsLength() + { + return getArgumentsLength(m_node->origin.semantic.inlineCallFrame); + } + + LValue getCurrentCallee() + { + if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame) { + if (frame->isClosureCall) + return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister())); + return weakPointer(frame->calleeRecovery.constant().asCell()); + } + return m_out.loadPtr(addressFor(JSStack::Callee)); + } + + LValue getArgumentsStart(InlineCallFrame* inlineCallFrame) + { + VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame); + return addressFor(start).value(); + } + + LValue getArgumentsStart() + { + return getArgumentsStart(m_node->origin.semantic.inlineCallFrame); + } + + template<typename Functor> + void checkStructure( + LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind, + const StructureSet& set, const Functor& weakStructureDiscriminant) + { + if (set.isEmpty()) { + terminate(exitKind); + return; + } + + if (set.size() == 1) { + speculate( + exitKind, formattedValue, 0, + m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0]))); + return; + } + + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation); + for (unsigned i = 0; i < set.size() - 1; ++i) { + LBasicBlock nextStructure = m_out.newBlock(); + m_out.branch( + m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])), + unsure(continuation), unsure(nextStructure)); + m_out.appendTo(nextStructure); + } + + speculate( + exitKind, formattedValue, 0, + m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last()))); + + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + } + + LValue numberOrNotCellToInt32(Edge edge, LValue value) + { + LBasicBlock intCase = m_out.newBlock(); + LBasicBlock notIntCase = m_out.newBlock(); + LBasicBlock doubleCase = 0; + LBasicBlock notNumberCase = 0; + if (edge.useKind() == NotCellUse) { + doubleCase = m_out.newBlock(); + notNumberCase = m_out.newBlock(); + } + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock> results; + + m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase)); + + LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase); + results.append(m_out.anchor(unboxInt32(value))); + m_out.jump(continuation); + + if (edge.useKind() == NumberUse) { + m_out.appendTo(notIntCase, continuation); + FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMisc(value)); + results.append(m_out.anchor(doubleToInt32(unboxDouble(value)))); + m_out.jump(continuation); + } else { + m_out.appendTo(notIntCase, doubleCase); + m_out.branch( + isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase)); + + m_out.appendTo(doubleCase, notNumberCase); + results.append(m_out.anchor(doubleToInt32(unboxDouble(value)))); + m_out.jump(continuation); + + m_out.appendTo(notNumberCase, continuation); + + FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCell, isCell(value)); + + LValue specialResult = m_out.select( + m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))), + m_out.int32One, m_out.int32Zero); + results.append(m_out.anchor(specialResult)); + m_out.jump(continuation); + } + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.int32, results); + } + + void checkInferredType(Edge edge, LValue value, const InferredType::Descriptor& type) + { + // This cannot use FTL_TYPE_CHECK or typeCheck() because it is called partially, as in a node like: + // + // MultiPutByOffset(...) + // + // may be lowered to: + // + // switch (object->structure) { + // case 42: + // checkInferredType(..., type1); + // ... + // break; + // case 43: + // checkInferredType(..., type2); + // ... + // break; + // } + // + // where type1 and type2 are different. Using typeCheck() would mean that the edge would be + // filtered by type1 & type2, instead of type1 | type2. + + switch (type.kind()) { + case InferredType::Bottom: + speculate(BadType, jsValueValue(value), edge.node(), m_out.booleanTrue); + return; + + case InferredType::Boolean: + speculate(BadType, jsValueValue(value), edge.node(), isNotBoolean(value, provenType(edge))); + return; + + case InferredType::Other: + speculate(BadType, jsValueValue(value), edge.node(), isNotOther(value, provenType(edge))); + return; + + case InferredType::Int32: + speculate(BadType, jsValueValue(value), edge.node(), isNotInt32(value, provenType(edge))); + return; + + case InferredType::Number: + speculate(BadType, jsValueValue(value), edge.node(), isNotNumber(value, provenType(edge))); + return; + + case InferredType::String: + speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge))); + speculate(BadType, jsValueValue(value), edge.node(), isNotString(value, provenType(edge))); + return; + + case InferredType::Symbol: + speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge))); + speculate(BadType, jsValueValue(value), edge.node(), isNotSymbol(value, provenType(edge))); + return; + + case InferredType::ObjectWithStructure: + speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge))); + if (!abstractValue(edge).m_structure.isSubsetOf(StructureSet(type.structure()))) { + speculate( + BadType, jsValueValue(value), edge.node(), + m_out.notEqual( + m_out.load32(value, m_heaps.JSCell_structureID), + weakStructureID(type.structure()))); + } + return; + + case InferredType::ObjectWithStructureOrOther: { + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase); + + if (!abstractValue(edge).m_structure.isSubsetOf(StructureSet(type.structure()))) { + speculate( + BadType, jsValueValue(value), edge.node(), + m_out.notEqual( + m_out.load32(value, m_heaps.JSCell_structureID), + weakStructureID(type.structure()))); + } + + m_out.jump(continuation); + + m_out.appendTo(notCellCase, continuation); + + speculate( + BadType, jsValueValue(value), edge.node(), + isNotOther(value, provenType(edge) & ~SpecCell)); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return; + } + + case InferredType::Object: + speculate(BadType, jsValueValue(value), edge.node(), isNotCell(value, provenType(edge))); + speculate(BadType, jsValueValue(value), edge.node(), isNotObject(value, provenType(edge))); + return; + + case InferredType::ObjectOrOther: { + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase); + + speculate( + BadType, jsValueValue(value), edge.node(), + isNotObject(value, provenType(edge) & SpecCell)); + + m_out.jump(continuation); + + m_out.appendTo(notCellCase, continuation); + + speculate( + BadType, jsValueValue(value), edge.node(), + isNotOther(value, provenType(edge) & ~SpecCell)); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return; + } + + case InferredType::Top: + return; + } + + DFG_CRASH(m_graph, m_node, "Bad inferred type"); + } + + LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset) + { + return m_out.load64(addressOfProperty(storage, identifierNumber, offset)); + } + + void storeProperty( + LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset) + { + m_out.store64(value, addressOfProperty(storage, identifierNumber, offset)); + } + + TypedPointer addressOfProperty( + LValue storage, unsigned identifierNumber, PropertyOffset offset) + { + return m_out.address( + m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset)); + } + + LValue storageForTransition( + LValue object, PropertyOffset offset, + Structure* previousStructure, Structure* nextStructure) + { + if (isInlineOffset(offset)) + return object; + + if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity()) + return loadButterflyWithBarrier(object); + + LValue result; + if (!previousStructure->outOfLineCapacity()) + result = allocatePropertyStorage(object, previousStructure); + else { + result = reallocatePropertyStorage( + object, loadButterflyWithBarrier(object), + previousStructure, nextStructure); + } + + emitStoreBarrier(object); + + return result; + } + + LValue allocatePropertyStorage(LValue object, Structure* previousStructure) + { + if (previousStructure->couldHaveIndexingHeader()) { + return vmCall( + m_out.intPtr, + m_out.operation( + operationReallocateButterflyToHavePropertyStorageWithInitialCapacity), + m_callFrame, object); + } + + LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity); + m_out.storePtr(result, object, m_heaps.JSObject_butterfly); + return result; + } + + LValue reallocatePropertyStorage( + LValue object, LValue oldStorage, Structure* previous, Structure* next) + { + size_t oldSize = previous->outOfLineCapacity(); + size_t newSize = oldSize * outOfLineGrowthFactor; + + ASSERT_UNUSED(next, newSize == next->outOfLineCapacity()); + + if (previous->couldHaveIndexingHeader()) { + LValue newAllocSize = m_out.constIntPtr(newSize); + return vmCall(m_out.intPtr, m_out.operation(operationReallocateButterflyToGrowPropertyStorage), m_callFrame, object, newAllocSize); + } + + LValue result = allocatePropertyStorageWithSizeImpl(newSize); + + ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*); + ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue)); + + for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) { + LValue loaded = + m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset)); + m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset)); + } + + m_out.storePtr(result, m_out.address(object, m_heaps.JSObject_butterfly)); + + return result; + } + + LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues) + { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + LValue endOfStorage = allocateBasicStorageAndGetEnd( + m_out.constIntPtr(sizeInValues * sizeof(JSValue)), slowPath); + + ValueFromBlock fastButterfly = m_out.anchor( + m_out.add(m_out.constIntPtr(sizeof(IndexingHeader)), endOfStorage)); + + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + + LValue slowButterflyValue; + if (sizeInValues == initialOutOfLineCapacity) { + slowButterflyValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationAllocatePropertyStorageWithInitialCapacity, + locations[0].directGPR()); + }); + } else { + slowButterflyValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationAllocatePropertyStorage, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(sizeInValues)); + }); + } + ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + return m_out.phi(m_out.intPtr, fastButterfly, slowButterfly); + } + + LValue getById(LValue base) + { + Node* node = m_node; + UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()]; + + B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64); + patchpoint->appendSomeRegister(base); + + // FIXME: If this is a GetByIdFlush, we might get some performance boost if we claim that it + // clobbers volatile registers late. It's not necessary for correctness, though, since the + // IC code is super smart about saving registers. + // https://bugs.webkit.org/show_bug.cgi?id=152848 + + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + CallSiteIndex callSiteIndex = + state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic); + + // This is the direct exit target for operation calls. + Box<CCallHelpers::JumpList> exceptions = + exceptionHandle->scheduleExitCreation(params)->jumps(jit); + + // This is the exit for call IC's created by the getById for getters. We don't have + // to do anything weird other than call this, since it will associate the exit with + // the callsite index. + exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); + + auto generator = Box<JITGetByIdGenerator>::create( + jit.codeBlock(), node->origin.semantic, callSiteIndex, + params.unavailableRegisters(), JSValueRegs(params[1].gpr()), + JSValueRegs(params[0].gpr())); + + generator->generateFastPath(jit); + CCallHelpers::Label done = jit.label(); + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + generator->slowPathJump().link(&jit); + CCallHelpers::Label slowPathBegin = jit.label(); + CCallHelpers::Call slowPathCall = callOperation( + *state, params.unavailableRegisters(), jit, node->origin.semantic, + exceptions.get(), operationGetByIdOptimize, params[0].gpr(), + CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(), + CCallHelpers::TrustedImmPtr(uid)).call(); + jit.jump().linkTo(done, &jit); + + generator->reportSlowPathCall(slowPathBegin, slowPathCall); + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + generator->finalize(linkBuffer); + }); + }); + }); + + return patchpoint; + } + + LValue loadButterflyWithBarrier(LValue object) + { + return copyBarrier( + object, m_out.loadPtr(object, m_heaps.JSObject_butterfly), operationGetButterfly); + } + + LValue loadVectorWithBarrier(LValue object) + { + LValue fastResultValue = m_out.loadPtr(object, m_heaps.JSArrayBufferView_vector); + return copyBarrier( + fastResultValue, + [&] () -> LValue { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + m_out.branch(isFastTypedArray(object), rarely(slowPath), usually(continuation)); + + LBasicBlock lastNext = m_out.appendTo(slowPath, continuation); + + LValue slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationGetArrayBufferVector, locations[0].directGPR(), + locations[1].directGPR()); + }, object); + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.intPtr, fastResult, slowResult); + }); + } + + LValue copyBarrier(LValue object, LValue pointer, P_JITOperation_EC slowPathFunction) + { + return copyBarrier( + pointer, + [&] () -> LValue { + return lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + slowPathFunction, locations[0].directGPR(), locations[1].directGPR()); + }, object); + }); + } + + template<typename Functor> + LValue copyBarrier(LValue pointer, const Functor& functor) + { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock fastResult = m_out.anchor(pointer); + m_out.branch(isInToSpace(pointer), usually(continuation), rarely(slowPath)); + + LBasicBlock lastNext = m_out.appendTo(slowPath, continuation); + + ValueFromBlock slowResult = m_out.anchor(functor()); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.intPtr, fastResult, slowResult); + } + + LValue isInToSpace(LValue pointer) + { + return m_out.testIsZeroPtr(pointer, m_out.constIntPtr(CopyBarrierBase::spaceBits)); + } + + LValue loadButterflyReadOnly(LValue object) + { + return removeSpaceBits(m_out.loadPtr(object, m_heaps.JSObject_butterfly)); + } + + LValue loadVectorReadOnly(LValue object) + { + LValue fastResultValue = m_out.loadPtr(object, m_heaps.JSArrayBufferView_vector); + + LBasicBlock possiblyFromSpace = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + + m_out.branch(isInToSpace(fastResultValue), usually(continuation), rarely(possiblyFromSpace)); + + LBasicBlock lastNext = m_out.appendTo(possiblyFromSpace, continuation); + + LValue slowResultValue = m_out.select( + isFastTypedArray(object), removeSpaceBits(fastResultValue), fastResultValue); + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + return m_out.phi(m_out.intPtr, fastResult, slowResult); + } + + LValue removeSpaceBits(LValue storage) + { + return m_out.bitAnd( + storage, m_out.constIntPtr(~static_cast<intptr_t>(CopyBarrierBase::spaceBits))); + } + + LValue isFastTypedArray(LValue object) + { + return m_out.equal( + m_out.load32(object, m_heaps.JSArrayBufferView_mode), + m_out.constInt32(FastTypedArray)); + } + + TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0) + { + return m_out.baseIndex( + heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset); + } + + template<typename IntFunctor, typename DoubleFunctor> + void compare( + const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor, + S_JITOperation_EJJ helperFunction) + { + if (m_node->isBinaryUseKind(Int32Use)) { + LValue left = lowInt32(m_node->child1()); + LValue right = lowInt32(m_node->child2()); + setBoolean(intFunctor(left, right)); + return; + } + + if (m_node->isBinaryUseKind(Int52RepUse)) { + Int52Kind kind; + LValue left = lowWhicheverInt52(m_node->child1(), kind); + LValue right = lowInt52(m_node->child2(), kind); + setBoolean(intFunctor(left, right)); + return; + } + + if (m_node->isBinaryUseKind(DoubleRepUse)) { + LValue left = lowDouble(m_node->child1()); + LValue right = lowDouble(m_node->child2()); + setBoolean(doubleFunctor(left, right)); + return; + } + + if (m_node->isBinaryUseKind(UntypedUse)) { + nonSpeculativeCompare(intFunctor, helperFunction); + return; + } + + DFG_CRASH(m_graph, m_node, "Bad use kinds"); + } + + void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild) + { + LValue rightCell = lowCell(rightChild); + LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation); + + speculateTruthyObject(rightChild, rightCell, SpecObject); + + LBasicBlock leftCellCase = m_out.newBlock(); + LBasicBlock leftNotCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + isCell(leftValue, provenType(leftChild)), + unsure(leftCellCase), unsure(leftNotCellCase)); + + LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase); + speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCell)); + ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue)); + m_out.jump(continuation); + + m_out.appendTo(leftNotCellCase, continuation); + FTL_TYPE_CHECK( + jsValueValue(leftValue), leftChild, SpecOther | SpecCell, isNotOther(leftValue)); + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, cellResult, notCellResult)); + } + + void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter) + { + if (masqueradesAsUndefinedWatchpointIsStillValid()) { + FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell)); + return; + } + + FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell)); + speculate( + BadType, jsValueValue(cell), edge.node(), + m_out.testNonZero32( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(MasqueradesAsUndefined))); + } + + template<typename IntFunctor> + void nonSpeculativeCompare(const IntFunctor& intFunctor, S_JITOperation_EJJ helperFunction) + { + LValue left = lowJSValue(m_node->child1()); + LValue right = lowJSValue(m_node->child2()); + + LBasicBlock leftIsInt = m_out.newBlock(); + LBasicBlock fastPath = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isNotInt32(left), rarely(slowPath), usually(leftIsInt)); + + LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath); + m_out.branch(isNotInt32(right), rarely(slowPath), usually(fastPath)); + + m_out.appendTo(fastPath, slowPath); + ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right))); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall( + m_out.intPtr, m_out.operation(helperFunction), m_callFrame, left, right))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + setBoolean(m_out.phi(m_out.boolean, fastResult, slowResult)); + } + + LValue stringsEqual(LValue leftJSString, LValue rightJSString) + { + LBasicBlock notTriviallyUnequalCase = m_out.newBlock(); + LBasicBlock notEmptyCase = m_out.newBlock(); + LBasicBlock leftReadyCase = m_out.newBlock(); + LBasicBlock rightReadyCase = m_out.newBlock(); + LBasicBlock left8BitCase = m_out.newBlock(); + LBasicBlock right8BitCase = m_out.newBlock(); + LBasicBlock loop = m_out.newBlock(); + LBasicBlock bytesEqual = m_out.newBlock(); + LBasicBlock trueCase = m_out.newBlock(); + LBasicBlock falseCase = m_out.newBlock(); + LBasicBlock slowCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue length = m_out.load32(leftJSString, m_heaps.JSString_length); + + m_out.branch( + m_out.notEqual(length, m_out.load32(rightJSString, m_heaps.JSString_length)), + unsure(falseCase), unsure(notTriviallyUnequalCase)); + + LBasicBlock lastNext = m_out.appendTo(notTriviallyUnequalCase, notEmptyCase); + + m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase)); + + m_out.appendTo(notEmptyCase, leftReadyCase); + + LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value); + LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value); + + m_out.branch(m_out.notNull(left), usually(leftReadyCase), rarely(slowCase)); + + m_out.appendTo(leftReadyCase, rightReadyCase); + + m_out.branch(m_out.notNull(right), usually(rightReadyCase), rarely(slowCase)); + + m_out.appendTo(rightReadyCase, left8BitCase); + + m_out.branch( + m_out.testIsZero32( + m_out.load32(left, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIs8Bit())), + unsure(slowCase), unsure(left8BitCase)); + + m_out.appendTo(left8BitCase, right8BitCase); + + m_out.branch( + m_out.testIsZero32( + m_out.load32(right, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIs8Bit())), + unsure(slowCase), unsure(right8BitCase)); + + m_out.appendTo(right8BitCase, loop); + + LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data); + LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data); + + ValueFromBlock indexAtStart = m_out.anchor(length); + + m_out.jump(loop); + + m_out.appendTo(loop, bytesEqual); + + LValue indexAtLoopTop = m_out.phi(m_out.int32, indexAtStart); + LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One); + + LValue leftByte = m_out.load8ZeroExt32( + m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop))); + LValue rightByte = m_out.load8ZeroExt32( + m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop))); + + m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual)); + + m_out.appendTo(bytesEqual, trueCase); + + ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop); + m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration); + m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase)); + + m_out.appendTo(trueCase, falseCase); + + ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue); + m_out.jump(continuation); + + m_out.appendTo(falseCase, slowCase); + + ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse); + m_out.jump(continuation); + + m_out.appendTo(slowCase, continuation); + + LValue slowResultValue = vmCall( + m_out.int64, m_out.operation(operationCompareStringEq), m_callFrame, + leftJSString, rightJSString); + ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.boolean, trueResult, falseResult, slowResult); + } + + enum ScratchFPRUsage { + DontNeedScratchFPR, + NeedScratchFPR + }; + template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR> + void emitBinarySnippet(J_JITOperation_EJJ slowPathFunction) + { + Node* node = m_node; + + LValue left = lowJSValue(node->child1()); + LValue right = lowJSValue(node->child2()); + + SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType()); + SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType()); + + PatchpointValue* patchpoint = m_out.patchpoint(Int64); + patchpoint->appendSomeRegister(left); + patchpoint->appendSomeRegister(right); + patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister)); + patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister)); + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + patchpoint->numGPScratchRegisters = 1; + patchpoint->numFPScratchRegisters = 2; + if (scratchFPRUsage == NeedScratchFPR) + patchpoint->numFPScratchRegisters++; + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + Box<CCallHelpers::JumpList> exceptions = + exceptionHandle->scheduleExitCreation(params)->jumps(jit); + + auto generator = Box<BinaryArithOpGenerator>::create( + leftOperand, rightOperand, JSValueRegs(params[0].gpr()), + JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), + params.fpScratch(0), params.fpScratch(1), params.gpScratch(0), + scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg); + + generator->generateFastPath(jit); + + if (generator->didEmitFastPath()) { + generator->endJumpList().link(&jit); + CCallHelpers::Label done = jit.label(); + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + generator->slowPathJumpList().link(&jit); + callOperation( + *state, params.unavailableRegisters(), jit, node->origin.semantic, + exceptions.get(), slowPathFunction, params[0].gpr(), + params[1].gpr(), params[2].gpr()); + jit.jump().linkTo(done, &jit); + }); + } else { + callOperation( + *state, params.unavailableRegisters(), jit, node->origin.semantic, + exceptions.get(), slowPathFunction, params[0].gpr(), params[1].gpr(), + params[2].gpr()); + } + }); + + setJSValue(patchpoint); + } + + template<typename BinaryBitOpGenerator> + void emitBinaryBitOpSnippet(J_JITOperation_EJJ slowPathFunction) + { + Node* node = m_node; + + // FIXME: Make this do exceptions. + // https://bugs.webkit.org/show_bug.cgi?id=151686 + + LValue left = lowJSValue(node->child1()); + LValue right = lowJSValue(node->child2()); + + SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType()); + SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType()); + + PatchpointValue* patchpoint = m_out.patchpoint(Int64); + patchpoint->appendSomeRegister(left); + patchpoint->appendSomeRegister(right); + patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister)); + patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister)); + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + patchpoint->numGPScratchRegisters = 1; + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + Box<CCallHelpers::JumpList> exceptions = + exceptionHandle->scheduleExitCreation(params)->jumps(jit); + + auto generator = Box<BinaryBitOpGenerator>::create( + leftOperand, rightOperand, JSValueRegs(params[0].gpr()), + JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0)); + + generator->generateFastPath(jit); + generator->endJumpList().link(&jit); + CCallHelpers::Label done = jit.label(); + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + generator->slowPathJumpList().link(&jit); + callOperation( + *state, params.unavailableRegisters(), jit, node->origin.semantic, + exceptions.get(), slowPathFunction, params[0].gpr(), + params[1].gpr(), params[2].gpr()); + jit.jump().linkTo(done, &jit); + }); + }); + + setJSValue(patchpoint); + } + + void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType) + { + Node* node = m_node; + + // FIXME: Make this do exceptions. + // https://bugs.webkit.org/show_bug.cgi?id=151686 + + LValue left = lowJSValue(node->child1()); + LValue right = lowJSValue(node->child2()); + + SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType()); + SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType()); + + PatchpointValue* patchpoint = m_out.patchpoint(Int64); + patchpoint->appendSomeRegister(left); + patchpoint->appendSomeRegister(right); + patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister)); + patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister)); + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(patchpoint); + patchpoint->numGPScratchRegisters = 1; + patchpoint->numFPScratchRegisters = 1; + patchpoint->clobber(RegisterSet::macroScratchRegisters()); + State* state = &m_ftlState; + patchpoint->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + Box<CCallHelpers::JumpList> exceptions = + exceptionHandle->scheduleExitCreation(params)->jumps(jit); + + auto generator = Box<JITRightShiftGenerator>::create( + leftOperand, rightOperand, JSValueRegs(params[0].gpr()), + JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), + params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType); + + generator->generateFastPath(jit); + generator->endJumpList().link(&jit); + CCallHelpers::Label done = jit.label(); + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + + generator->slowPathJumpList().link(&jit); + + J_JITOperation_EJJ slowPathFunction = + shiftType == JITRightShiftGenerator::SignedShift + ? operationValueBitRShift : operationValueBitURShift; + + callOperation( + *state, params.unavailableRegisters(), jit, node->origin.semantic, + exceptions.get(), slowPathFunction, params[0].gpr(), + params[1].gpr(), params[2].gpr()); + jit.jump().linkTo(done, &jit); + }); + }); + + setJSValue(patchpoint); + } + + LValue allocateCell(LValue allocator, LBasicBlock slowPath) + { + LBasicBlock success = m_out.newBlock(); + + LValue result; + LValue condition; + if (Options::forceGCSlowPaths()) { + result = m_out.intPtrZero; + condition = m_out.booleanFalse; + } else { + result = m_out.loadPtr( + allocator, m_heaps.MarkedAllocator_freeListHead); + condition = m_out.notNull(result); + } + m_out.branch(condition, usually(success), rarely(slowPath)); + + m_out.appendTo(success); + + m_out.storePtr( + m_out.loadPtr(result, m_heaps.JSCell_freeListNext), + allocator, m_heaps.MarkedAllocator_freeListHead); + + return result; + } + + void storeStructure(LValue object, Structure* structure) + { + m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID); + m_out.store32( + m_out.constInt32(structure->objectInitializationBlob()), + object, m_heaps.JSCell_usefulBytes); + } + + LValue allocateCell(LValue allocator, Structure* structure, LBasicBlock slowPath) + { + LValue result = allocateCell(allocator, slowPath); + storeStructure(result, structure); + return result; + } + + LValue allocateObject( + LValue allocator, Structure* structure, LValue butterfly, LBasicBlock slowPath) + { + LValue result = allocateCell(allocator, structure, slowPath); + m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly); + return result; + } + + template<typename ClassType> + LValue allocateObject( + size_t size, Structure* structure, LValue butterfly, LBasicBlock slowPath) + { + MarkedAllocator* allocator = &vm().heap.allocatorForObjectOfType<ClassType>(size); + return allocateObject(m_out.constIntPtr(allocator), structure, butterfly, slowPath); + } + + template<typename ClassType> + LValue allocateObject(Structure* structure, LValue butterfly, LBasicBlock slowPath) + { + return allocateObject<ClassType>( + ClassType::allocationSize(0), structure, butterfly, slowPath); + } + + template<typename ClassType> + LValue allocateVariableSizedObject( + LValue size, Structure* structure, LValue butterfly, LBasicBlock slowPath) + { + static_assert(!(MarkedSpace::preciseStep & (MarkedSpace::preciseStep - 1)), "MarkedSpace::preciseStep must be a power of two."); + static_assert(!(MarkedSpace::impreciseStep & (MarkedSpace::impreciseStep - 1)), "MarkedSpace::impreciseStep must be a power of two."); + + LValue subspace = m_out.constIntPtr(&vm().heap.subspaceForObjectOfType<ClassType>()); + + LBasicBlock smallCaseBlock = m_out.newBlock(); + LBasicBlock largeOrOversizeCaseBlock = m_out.newBlock(); + LBasicBlock largeCaseBlock = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue uproundedSize = m_out.add(size, m_out.constInt32(MarkedSpace::preciseStep - 1)); + LValue isSmall = m_out.below(uproundedSize, m_out.constInt32(MarkedSpace::preciseCutoff)); + m_out.branch(isSmall, unsure(smallCaseBlock), unsure(largeOrOversizeCaseBlock)); + + LBasicBlock lastNext = m_out.appendTo(smallCaseBlock, largeOrOversizeCaseBlock); + TypedPointer address = m_out.baseIndex( + m_heaps.MarkedSpace_Subspace_preciseAllocators, subspace, + m_out.zeroExtPtr(m_out.lShr(uproundedSize, m_out.constInt32(getLSBSet(MarkedSpace::preciseStep))))); + ValueFromBlock smallAllocator = m_out.anchor(address.value()); + m_out.jump(continuation); + + m_out.appendTo(largeOrOversizeCaseBlock, largeCaseBlock); + m_out.branch( + m_out.below(uproundedSize, m_out.constInt32(MarkedSpace::impreciseCutoff)), + usually(largeCaseBlock), rarely(slowPath)); + + m_out.appendTo(largeCaseBlock, continuation); + address = m_out.baseIndex( + m_heaps.MarkedSpace_Subspace_impreciseAllocators, subspace, + m_out.zeroExtPtr(m_out.lShr(uproundedSize, m_out.constInt32(getLSBSet(MarkedSpace::impreciseStep))))); + ValueFromBlock largeAllocator = m_out.anchor(address.value()); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + LValue allocator = m_out.phi(m_out.intPtr, smallAllocator, largeAllocator); + + return allocateObject(allocator, structure, butterfly, slowPath); + } + + // Returns a pointer to the end of the allocation. + LValue allocateBasicStorageAndGetEnd(LValue size, LBasicBlock slowPath) + { + CopiedAllocator& allocator = vm().heap.storageAllocator(); + + LBasicBlock success = m_out.newBlock(); + + LValue remaining = m_out.loadPtr(m_out.absolute(&allocator.m_currentRemaining)); + LValue newRemaining = m_out.sub(remaining, size); + + m_out.branch( + m_out.lessThan(newRemaining, m_out.intPtrZero), + rarely(slowPath), usually(success)); + + m_out.appendTo(success); + + m_out.storePtr(newRemaining, m_out.absolute(&allocator.m_currentRemaining)); + return m_out.sub( + m_out.loadPtr(m_out.absolute(&allocator.m_currentPayloadEnd)), newRemaining); + } + + LValue allocateBasicStorage(LValue size, LBasicBlock slowPath) + { + return m_out.sub(allocateBasicStorageAndGetEnd(size, slowPath), size); + } + + LValue allocateObject(Structure* structure) + { + size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); + MarkedAllocator* allocator = &vm().heap.allocatorForObjectWithoutDestructor(allocationSize); + + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + ValueFromBlock fastResult = m_out.anchor(allocateObject( + m_out.constIntPtr(allocator), structure, m_out.intPtrZero, slowPath)); + + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + + LValue slowResultValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationNewObject, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure)); + }); + ValueFromBlock slowResult = m_out.anchor(slowResultValue); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.intPtr, fastResult, slowResult); + } + + struct ArrayValues { + ArrayValues() + : array(0) + , butterfly(0) + { + } + + ArrayValues(LValue array, LValue butterfly) + : array(array) + , butterfly(butterfly) + { + } + + LValue array; + LValue butterfly; + }; + ArrayValues allocateJSArray( + Structure* structure, unsigned numElements, LBasicBlock slowPath) + { + ASSERT( + hasUndecided(structure->indexingType()) + || hasInt32(structure->indexingType()) + || hasDouble(structure->indexingType()) + || hasContiguous(structure->indexingType())); + + unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements); + + LValue endOfStorage = allocateBasicStorageAndGetEnd( + m_out.constIntPtr(sizeof(JSValue) * vectorLength + sizeof(IndexingHeader)), + slowPath); + + LValue butterfly = m_out.sub( + endOfStorage, m_out.constIntPtr(sizeof(JSValue) * vectorLength)); + + LValue object = allocateObject<JSArray>( + structure, butterfly, slowPath); + + m_out.store32(m_out.constInt32(numElements), butterfly, m_heaps.Butterfly_publicLength); + m_out.store32(m_out.constInt32(vectorLength), butterfly, m_heaps.Butterfly_vectorLength); + + if (hasDouble(structure->indexingType())) { + for (unsigned i = numElements; i < vectorLength; ++i) { + m_out.store64( + m_out.constInt64(bitwise_cast<int64_t>(PNaN)), + butterfly, m_heaps.indexedDoubleProperties[i]); + } + } + + return ArrayValues(object, butterfly); + } + + ArrayValues allocateJSArray(Structure* structure, unsigned numElements) + { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath); + + ArrayValues fastValues = allocateJSArray(structure, numElements, slowPath); + ValueFromBlock fastArray = m_out.anchor(fastValues.array); + ValueFromBlock fastButterfly = m_out.anchor(fastValues.butterfly); + + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + + LValue slowArrayValue = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationNewArrayWithSize, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(structure), CCallHelpers::TrustedImm32(numElements)); + }); + ValueFromBlock slowArray = m_out.anchor(slowArrayValue); + ValueFromBlock slowButterfly = m_out.anchor( + m_out.loadPtr(slowArrayValue, m_heaps.JSObject_butterfly)); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + return ArrayValues( + m_out.phi(m_out.intPtr, fastArray, slowArray), + m_out.phi(m_out.intPtr, fastButterfly, slowButterfly)); + } + + LValue boolify(Edge edge) + { + switch (edge.useKind()) { + case BooleanUse: + case KnownBooleanUse: + return lowBoolean(edge); + case Int32Use: + return m_out.notZero32(lowInt32(edge)); + case DoubleRepUse: + return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero); + case ObjectOrOtherUse: + return m_out.logicalNot( + equalNullOrUndefined( + edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined, + ManualOperandSpeculation)); + case StringUse: { + LValue stringValue = lowString(edge); + LValue length = m_out.load32NonNegative(stringValue, m_heaps.JSString_length); + return m_out.notEqual(length, m_out.int32Zero); + } + case StringOrOtherUse: { + LValue value = lowJSValue(edge, ManualOperandSpeculation); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase); + + FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCell) | SpecString, isNotString(value)); + LValue length = m_out.load32NonNegative(value, m_heaps.JSString_length); + ValueFromBlock cellResult = m_out.anchor(m_out.notEqual(length, m_out.int32Zero)); + m_out.jump(continuation); + + m_out.appendTo(notCellCase, continuation); + + FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value)); + ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse); + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + + return m_out.phi(Int32, cellResult, notCellResult); + } + case UntypedUse: { + LValue value = lowJSValue(edge); + + // Implements the following control flow structure: + // if (value is cell) { + // if (value is string) + // result = !!value->length + // else { + // do evil things for masquerades-as-undefined + // result = true + // } + // } else if (value is int32) { + // result = !!unboxInt32(value) + // } else if (value is number) { + // result = !!unboxDouble(value) + // } else { + // result = value == jsTrue + // } + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock stringCase = m_out.newBlock(); + LBasicBlock notStringCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock int32Case = m_out.newBlock(); + LBasicBlock notInt32Case = m_out.newBlock(); + LBasicBlock doubleCase = m_out.newBlock(); + LBasicBlock notDoubleCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock> results; + + m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, stringCase); + m_out.branch( + isString(value, provenType(edge) & SpecCell), + unsure(stringCase), unsure(notStringCase)); + + m_out.appendTo(stringCase, notStringCase); + LValue nonEmptyString = m_out.notZero32( + m_out.load32NonNegative(value, m_heaps.JSString_length)); + results.append(m_out.anchor(nonEmptyString)); + m_out.jump(continuation); + + m_out.appendTo(notStringCase, notCellCase); + LValue isTruthyObject; + if (masqueradesAsUndefinedWatchpointIsStillValid()) + isTruthyObject = m_out.booleanTrue; + else { + LBasicBlock masqueradesCase = m_out.newBlock(); + + results.append(m_out.anchor(m_out.booleanTrue)); + + m_out.branch( + m_out.testIsZero32( + m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(MasqueradesAsUndefined)), + usually(continuation), rarely(masqueradesCase)); + + m_out.appendTo(masqueradesCase); + + isTruthyObject = m_out.notEqual( + m_out.constIntPtr(m_graph.globalObjectFor(m_node->origin.semantic)), + m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject)); + } + results.append(m_out.anchor(isTruthyObject)); + m_out.jump(continuation); + + m_out.appendTo(notCellCase, int32Case); + m_out.branch( + isInt32(value, provenType(edge) & ~SpecCell), + unsure(int32Case), unsure(notInt32Case)); + + m_out.appendTo(int32Case, notInt32Case); + results.append(m_out.anchor(m_out.notZero32(unboxInt32(value)))); + m_out.jump(continuation); + + m_out.appendTo(notInt32Case, doubleCase); + m_out.branch( + isNumber(value, provenType(edge) & ~SpecCell), + unsure(doubleCase), unsure(notDoubleCase)); + + m_out.appendTo(doubleCase, notDoubleCase); + LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered( + unboxDouble(value), m_out.constDouble(0)); + results.append(m_out.anchor(doubleIsTruthy)); + m_out.jump(continuation); + + m_out.appendTo(notDoubleCase, continuation); + LValue miscIsTruthy = m_out.equal( + value, m_out.constInt64(JSValue::encode(jsBoolean(true)))); + results.append(m_out.anchor(miscIsTruthy)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.boolean, results); + } + default: + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return 0; + } + } + + enum StringOrObjectMode { + AllCellsAreFalse, + CellCaseSpeculatesObject + }; + enum EqualNullOrUndefinedMode { + EqualNull, + EqualUndefined, + EqualNullOrUndefined, + SpeculateNullOrUndefined + }; + LValue equalNullOrUndefined( + Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode, + OperandSpeculationMode operandMode = AutomaticOperandSpeculation) + { + bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid(); + + LValue value = lowJSValue(edge, operandMode); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock primitiveCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase); + + Vector<ValueFromBlock, 3> results; + + switch (cellMode) { + case AllCellsAreFalse: + break; + case CellCaseSpeculatesObject: + FTL_TYPE_CHECK( + jsValueValue(value), edge, (~SpecCell) | SpecObject, isNotObject(value)); + break; + } + + if (validWatchpoint) { + results.append(m_out.anchor(m_out.booleanFalse)); + m_out.jump(continuation); + } else { + LBasicBlock masqueradesCase = + m_out.newBlock(); + + results.append(m_out.anchor(m_out.booleanFalse)); + + m_out.branch( + m_out.testNonZero32( + m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(MasqueradesAsUndefined)), + rarely(masqueradesCase), usually(continuation)); + + m_out.appendTo(masqueradesCase, primitiveCase); + + LValue structure = loadStructure(value); + + results.append(m_out.anchor( + m_out.equal( + m_out.constIntPtr(m_graph.globalObjectFor(m_node->origin.semantic)), + m_out.loadPtr(structure, m_heaps.Structure_globalObject)))); + m_out.jump(continuation); + } + + m_out.appendTo(primitiveCase, continuation); + + LValue primitiveResult; + switch (primitiveMode) { + case EqualNull: + primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull)); + break; + case EqualUndefined: + primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined)); + break; + case EqualNullOrUndefined: + primitiveResult = isOther(value, provenType(edge)); + break; + case SpeculateNullOrUndefined: + FTL_TYPE_CHECK( + jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value)); + primitiveResult = m_out.booleanTrue; + break; + } + results.append(m_out.anchor(primitiveResult)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + return m_out.phi(m_out.boolean, results); + } + + template<typename FunctionType> + void contiguousPutByValOutOfBounds( + FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value, + LBasicBlock continuation) + { + LValue isNotInBounds = m_out.aboveOrEqual( + index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)); + if (!m_node->arrayMode().isInBounds()) { + LBasicBlock notInBoundsCase = + m_out.newBlock(); + LBasicBlock performStore = + m_out.newBlock(); + + m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore)); + + LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore); + + LValue isOutOfBounds = m_out.aboveOrEqual( + index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength)); + + if (!m_node->arrayMode().isOutOfBounds()) + speculate(OutOfBounds, noValue(), 0, isOutOfBounds); + else { + LBasicBlock outOfBoundsCase = + m_out.newBlock(); + LBasicBlock holeCase = + m_out.newBlock(); + + m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase)); + + LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase); + + vmCall( + m_out.voidType, m_out.operation(slowPathFunction), + m_callFrame, base, index, value); + + m_out.jump(continuation); + + m_out.appendTo(holeCase, innerLastNext); + } + + m_out.store32( + m_out.add(index, m_out.int32One), + storage, m_heaps.Butterfly_publicLength); + + m_out.jump(performStore); + m_out.appendTo(performStore, lastNext); + } + } + + void buildSwitch(SwitchData* data, LType type, LValue switchValue) + { + ASSERT(type == m_out.intPtr || type == m_out.int32); + + Vector<SwitchCase> cases; + for (unsigned i = 0; i < data->cases.size(); ++i) { + SwitchCase newCase; + + if (type == m_out.intPtr) { + newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)), + lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count)); + } else if (type == m_out.int32) { + newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)), + lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count)); + } else + CRASH(); + + cases.append(newCase); + } + + m_out.switchInstruction( + switchValue, cases, + lowBlock(data->fallThrough.block), Weight(data->fallThrough.count)); + } + + void switchString(SwitchData* data, LValue string) + { + bool canDoBinarySwitch = true; + unsigned totalLength = 0; + + for (DFG::SwitchCase myCase : data->cases) { + StringImpl* string = myCase.value.stringImpl(); + if (!string->is8Bit()) { + canDoBinarySwitch = false; + break; + } + if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) { + canDoBinarySwitch = false; + break; + } + totalLength += string->length(); + } + + if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) { + switchStringSlow(data, string); + return; + } + + LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value); + LValue length = m_out.load32(string, m_heaps.JSString_length); + + LBasicBlock hasImplBlock = m_out.newBlock(); + LBasicBlock is8BitBlock = m_out.newBlock(); + LBasicBlock slowBlock = m_out.newBlock(); + + m_out.branch(m_out.isNull(stringImpl), unsure(slowBlock), unsure(hasImplBlock)); + + LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock); + + m_out.branch( + m_out.testIsZero32( + m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIs8Bit())), + unsure(slowBlock), unsure(is8BitBlock)); + + m_out.appendTo(is8BitBlock, slowBlock); + + LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data); + + // FIXME: We should propagate branch weight data to the cases of this switch. + // https://bugs.webkit.org/show_bug.cgi?id=144368 + + Vector<StringSwitchCase> cases; + for (DFG::SwitchCase myCase : data->cases) + cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block))); + std::sort(cases.begin(), cases.end()); + switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false); + + m_out.appendTo(slowBlock, lastNext); + switchStringSlow(data, string); + } + + // The code for string switching is based closely on the same code in the DFG backend. While it + // would be nice to reduce the amount of similar-looking code, it seems like this is one of + // those algorithms where factoring out the common bits would result in more code than just + // duplicating. + + struct StringSwitchCase { + StringSwitchCase() { } + + StringSwitchCase(StringImpl* string, LBasicBlock target) + : string(string) + , target(target) + { + } + + bool operator<(const StringSwitchCase& other) const + { + return stringLessThan(*string, *other.string); + } + + StringImpl* string; + LBasicBlock target; + }; + + struct CharacterCase { + CharacterCase() + : character(0) + , begin(0) + , end(0) + { + } + + CharacterCase(LChar character, unsigned begin, unsigned end) + : character(character) + , begin(begin) + , end(end) + { + } + + bool operator<(const CharacterCase& other) const + { + return character < other.character; + } + + LChar character; + unsigned begin; + unsigned end; + }; + + void switchStringRecurse( + SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases, + unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength, + unsigned checkedExactLength) + { + LBasicBlock fallThrough = lowBlock(data->fallThrough.block); + + if (begin == end) { + m_out.jump(fallThrough); + return; + } + + unsigned minLength = cases[begin].string->length(); + unsigned commonChars = minLength; + bool allLengthsEqual = true; + for (unsigned i = begin + 1; i < end; ++i) { + unsigned myCommonChars = numChecked; + unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length()); + for (unsigned j = numChecked; j < limit; ++j) { + if (cases[begin].string->at(j) != cases[i].string->at(j)) + break; + myCommonChars++; + } + commonChars = std::min(commonChars, myCommonChars); + if (minLength != cases[i].string->length()) + allLengthsEqual = false; + minLength = std::min(minLength, cases[i].string->length()); + } + + if (checkedExactLength) { + DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength); + DFG_ASSERT(m_graph, m_node, allLengthsEqual); + } + + DFG_ASSERT(m_graph, m_node, minLength >= commonChars); + + if (!allLengthsEqual && alreadyCheckedLength < minLength) + m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough)); + if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength)) + m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough)); + + for (unsigned i = numChecked; i < commonChars; ++i) { + m_out.check( + m_out.notEqual( + m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]), + m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))), + unsure(fallThrough)); + } + + if (minLength == commonChars) { + // This is the case where one of the cases is a prefix of all of the other cases. + // We've already checked that the input string is a prefix of all of the cases, + // so we just check length to jump to that case. + + DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars); + for (unsigned i = begin + 1; i < end; ++i) + DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars); + + if (allLengthsEqual) { + DFG_ASSERT(m_graph, m_node, end == begin + 1); + m_out.jump(cases[begin].target); + return; + } + + m_out.check( + m_out.equal(length, m_out.constInt32(commonChars)), + unsure(cases[begin].target)); + + // We've checked if the length is >= minLength, and then we checked if the length is + // == commonChars. We get to this point if it is >= minLength but not == commonChars. + // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1. + switchStringRecurse( + data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false); + return; + } + + // At this point we know that the string is longer than commonChars, and we've only verified + // commonChars. Use a binary switch on the next unchecked character, i.e. + // string[commonChars]. + + DFG_ASSERT(m_graph, m_node, end >= begin + 2); + + LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]); + + Vector<CharacterCase> characterCases; + CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1); + for (unsigned i = begin + 1; i < end; ++i) { + LChar currentChar = cases[i].string->at(commonChars); + if (currentChar != currentCase.character) { + currentCase.end = i; + characterCases.append(currentCase); + currentCase = CharacterCase(currentChar, i, i + 1); + } else + currentCase.end = i + 1; + } + characterCases.append(currentCase); + + Vector<LBasicBlock> characterBlocks; + for (unsigned i = characterCases.size(); i--;) + characterBlocks.append(m_out.newBlock()); + + Vector<SwitchCase> switchCases; + for (unsigned i = 0; i < characterCases.size(); ++i) { + if (i) + DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character); + switchCases.append(SwitchCase( + m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight())); + } + m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight()); + + LBasicBlock lastNext = m_out.m_nextBlock; + characterBlocks.append(lastNext); // Makes it convenient to set nextBlock. + for (unsigned i = 0; i < characterCases.size(); ++i) { + m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]); + switchStringRecurse( + data, buffer, length, cases, commonChars + 1, + characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual); + } + + DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext); + } + + void switchStringSlow(SwitchData* data, LValue string) + { + // FIXME: We ought to be able to use computed gotos here. We would save the labels of the + // blocks we want to jump to, and then request their addresses after compilation completes. + // https://bugs.webkit.org/show_bug.cgi?id=144369 + + LValue branchOffset = vmCall( + m_out.int32, m_out.operation(operationSwitchStringAndGetBranchOffset), + m_callFrame, m_out.constIntPtr(data->switchTableIndex), string); + + StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex); + + Vector<SwitchCase> cases; + std::unordered_set<int32_t> alreadyHandled; // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here. + for (unsigned i = 0; i < data->cases.size(); ++i) { + // FIXME: The fact that we're using the bytecode's switch table means that the + // following DFG IR transformation would be invalid. + // + // Original code: + // switch (v) { + // case "foo": + // case "bar": + // things(); + // break; + // default: + // break; + // } + // + // New code: + // switch (v) { + // case "foo": + // instrumentFoo(); + // goto _things; + // case "bar": + // instrumentBar(); + // _things: + // things(); + // break; + // default: + // break; + // } + // + // Luckily, we don't currently do any such transformation. But it's kind of silly that + // this is an issue. + // https://bugs.webkit.org/show_bug.cgi?id=144635 + + DFG::SwitchCase myCase = data->cases[i]; + StringJumpTable::StringOffsetTable::iterator iter = + table.offsetTable.find(myCase.value.stringImpl()); + DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end()); + + if (!alreadyHandled.insert(iter->value.branchOffset).second) + continue; + + cases.append(SwitchCase( + m_out.constInt32(iter->value.branchOffset), + lowBlock(myCase.target.block), Weight(myCase.target.count))); + } + + m_out.switchInstruction( + branchOffset, cases, lowBlock(data->fallThrough.block), + Weight(data->fallThrough.count)); + } + + // Calls the functor at the point of code generation where we know what the result type is. + // You can emit whatever code you like at that point. Expects you to terminate the basic block. + // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if + // you aren't using this as the terminator of a high-level block, you should create your own + // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before + // calling this. For example: + // + // LBasicBlock continuation = m_out.newBlock(); + // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation); + // buildTypeOf( + // child, value, + // [&] (TypeofType type) { + // do things; + // m_out.jump(continuation); + // }); + // m_out.appendTo(continuation, lastNext); + template<typename Functor> + void buildTypeOf(Edge child, LValue value, const Functor& functor) + { + JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic); + + // Implements the following branching structure: + // + // if (is cell) { + // if (is object) { + // if (is function) { + // return function; + // } else if (doesn't have call trap and doesn't masquerade as undefined) { + // return object + // } else { + // return slowPath(); + // } + // } else if (is string) { + // return string + // } else { + // return symbol + // } + // } else if (is number) { + // return number + // } else if (is null) { + // return object + // } else if (is boolean) { + // return boolean + // } else { + // return undefined + // } + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock objectCase = m_out.newBlock(); + LBasicBlock functionCase = m_out.newBlock(); + LBasicBlock notFunctionCase = m_out.newBlock(); + LBasicBlock reallyObjectCase = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock unreachable = m_out.newBlock(); + LBasicBlock notObjectCase = m_out.newBlock(); + LBasicBlock stringCase = m_out.newBlock(); + LBasicBlock symbolCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock numberCase = m_out.newBlock(); + LBasicBlock notNumberCase = m_out.newBlock(); + LBasicBlock notNullCase = m_out.newBlock(); + LBasicBlock booleanCase = m_out.newBlock(); + LBasicBlock undefinedCase = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase); + m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase)); + + m_out.appendTo(objectCase, functionCase); + m_out.branch( + isFunction(value, provenType(child) & SpecObject), + unsure(functionCase), unsure(notFunctionCase)); + + m_out.appendTo(functionCase, notFunctionCase); + functor(TypeofType::Function); + + m_out.appendTo(notFunctionCase, reallyObjectCase); + m_out.branch( + isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)), + rarely(slowPath), usually(reallyObjectCase)); + + m_out.appendTo(reallyObjectCase, slowPath); + functor(TypeofType::Object); + + m_out.appendTo(slowPath, unreachable); + LValue result = lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + return createLazyCallGenerator( + operationTypeOfObjectAsTypeofType, locations[0].directGPR(), + CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR()); + }, value); + Vector<SwitchCase, 3> cases; + cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase)); + cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase)); + cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase)); + m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight()); + + m_out.appendTo(unreachable, notObjectCase); + m_out.unreachable(); + + m_out.appendTo(notObjectCase, stringCase); + m_out.branch( + isString(value, provenType(child) & (SpecCell - SpecObject)), + unsure(stringCase), unsure(symbolCase)); + + m_out.appendTo(stringCase, symbolCase); + functor(TypeofType::String); + + m_out.appendTo(symbolCase, notCellCase); + functor(TypeofType::Symbol); + + m_out.appendTo(notCellCase, numberCase); + m_out.branch( + isNumber(value, provenType(child) & ~SpecCell), + unsure(numberCase), unsure(notNumberCase)); + + m_out.appendTo(numberCase, notNumberCase); + functor(TypeofType::Number); + + m_out.appendTo(notNumberCase, notNullCase); + LValue isNull; + if (provenType(child) & SpecOther) + isNull = m_out.equal(value, m_out.constInt64(ValueNull)); + else + isNull = m_out.booleanFalse; + m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase)); + + m_out.appendTo(notNullCase, booleanCase); + m_out.branch( + isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)), + unsure(booleanCase), unsure(undefinedCase)); + + m_out.appendTo(booleanCase, undefinedCase); + functor(TypeofType::Boolean); + + m_out.appendTo(undefinedCase, lastNext); + functor(TypeofType::Undefined); + } + + LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true) + { + LBasicBlock greatEnough = m_out.newBlock(); + LBasicBlock withinRange = m_out.newBlock(); + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock, 2> results; + + m_out.branch( + m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)), + unsure(greatEnough), unsure(slowPath)); + + LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange); + m_out.branch( + m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)), + unsure(withinRange), unsure(slowPath)); + + m_out.appendTo(withinRange, slowPath); + LValue fastResult; + if (isSigned) + fastResult = m_out.doubleToInt(doubleValue); + else + fastResult = m_out.doubleToUInt(doubleValue); + results.append(m_out.anchor(fastResult)); + m_out.jump(continuation); + + m_out.appendTo(slowPath, continuation); + results.append(m_out.anchor(m_out.call(m_out.int32, m_out.operation(toInt32), doubleValue))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.int32, results); + } + + LValue doubleToInt32(LValue doubleValue) + { + if (Output::hasSensibleDoubleToInt()) + return sensibleDoubleToInt32(doubleValue); + + double limit = pow(2, 31) - 1; + return doubleToInt32(doubleValue, -limit, limit); + } + + LValue sensibleDoubleToInt32(LValue doubleValue) + { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue fastResultValue = m_out.doubleToInt(doubleValue); + ValueFromBlock fastResult = m_out.anchor(fastResultValue); + m_out.branch( + m_out.equal(fastResultValue, m_out.constInt32(0x80000000)), + rarely(slowPath), usually(continuation)); + + LBasicBlock lastNext = m_out.appendTo(slowPath, continuation); + ValueFromBlock slowResult = m_out.anchor( + m_out.call(m_out.int32, m_out.operation(toInt32), doubleValue)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.int32, fastResult, slowResult); + } + + // This is a mechanism for creating a code generator that fills in a gap in the code using our + // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want + // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually + // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps. + // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you + // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have + // to spill to top of stack if there aren't enough registers available. + // + // Lazy slow paths involve three different stages of execution. Each stage has unique + // capabilities and knowledge. The stages are: + // + // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will + // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow + // path, then you must pass it as an argument here (as one of the varargs arguments after the + // functor). But, lowering doesn't know which registers will be used for those LValues. Hence + // you pass a lambda to lazySlowPath() and that lambda will run during stage (2): + // + // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know + // the mapping from arguments passed to this method in (1) and the registers that B3 + // selected for those arguments. You don't actually want to generate any code here, since then + // the slow path wouldn't actually be lazily generated. Instead, you want to save the + // registers being used for the arguments and defer code generation to stage (3) by creating + // and returning a LazySlowPath::Generator: + // + // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path + // executing for the first time. It will call the generator you created in stage (2). + // + // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times. + // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which + // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication), + // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each + // stage (2). It will be called zero times if the slow path never runs. This is what you hope for + // whenever you use the lazySlowPath() mechanism. + // + // A typical use of lazySlowPath() will look like the example below, which just creates a slow + // path that adds some value to the input and returns it. + // + // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs. + // // Notice how we pass "input" as an argument to lazySlowPath(). + // LValue input = ...; + // int addend = ...; + // LValue output = lazySlowPath( + // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + // // Stage (2) is here. This is your last chance to figure out which registers are used + // // for which values. Location zero is always the return value. You can ignore it if + // // you don't want to return anything. Location 1 is the register for the first + // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could + // // also hold an FPR, if you are passing a double. + // GPRReg outputGPR = locations[0].directGPR(); + // GPRReg inputGPR = locations[1].directGPR(); + // return LazySlowPath::createGenerator( + // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) { + // // Stage (3) is here. This is when you generate code. You have access to the + // // registers you collected in stage (2) because this lambda closes over those + // // variables (outputGPR and inputGPR). You also have access to whatever extra + // // data you collected in stage (1), such as the addend in this case. + // jit.add32(TrustedImm32(addend), inputGPR, outputGPR); + // // You have to end by jumping to done. There is nothing to fall through to. + // // You can also jump to the exception handler (see LazySlowPath.h for more + // // info). Note that currently you cannot OSR exit. + // params.doneJumps.append(jit.jump()); + // }); + // }, + // input); + // + // You can basically pass as many inputs as you like, either using this varargs form, or by + // passing a Vector of LValues. + // + // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator() + // helper. For example: + // + // LValue input = ...; + // LValue output = lazySlowPath( + // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + // return createLazyCallGenerator( + // operationDoThings, locations[0].directGPR(), locations[1].directGPR()); + // }, input); + // + // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda - + // run after the function that created them returns. Hence, you should not use by-reference + // capture (i.e. [&]) in any of these lambdas. + template<typename Functor, typename... ArgumentTypes> + LValue lazySlowPath(const Functor& functor, ArgumentTypes... arguments) + { + return lazySlowPath(functor, Vector<LValue>{ arguments... }); + } + + template<typename Functor> + LValue lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments) + { + CodeOrigin origin = m_node->origin.semantic; + + PatchpointValue* result = m_out.patchpoint(B3::Int64); + for (LValue arg : userArguments) + result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister)); + + RefPtr<PatchpointExceptionHandle> exceptionHandle = + preparePatchpointForExceptions(result); + + result->clobber(RegisterSet::macroScratchRegisters()); + State* state = &m_ftlState; + + result->setGenerator( + [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { + Vector<Location> locations; + for (const B3::ValueRep& rep : params) + locations.append(Location::forValueRep(rep)); + + RefPtr<LazySlowPath::Generator> generator = functor(locations); + + CCallHelpers::PatchableJump patchableJump = jit.patchableJump(); + CCallHelpers::Label done = jit.label(); + + RegisterSet usedRegisters = params.unavailableRegisters(); + + RefPtr<ExceptionTarget> exceptionTarget = + exceptionHandle->scheduleExitCreation(params); + + // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here. + // Doing so should automagically register late paths that emit exit thunks. + + params.addLatePath( + [=] (CCallHelpers& jit) { + AllowMacroScratchRegisterUsage allowScratch(jit); + patchableJump.m_jump.link(&jit); + unsigned index = state->jitCode->lazySlowPaths.size(); + state->jitCode->lazySlowPaths.append(nullptr); + jit.pushToSaveImmediateWithoutTouchingRegisters( + CCallHelpers::TrustedImm32(index)); + CCallHelpers::Jump generatorJump = jit.jump(); + + // Note that so long as we're here, we don't really know if our late path + // runs before or after any other late paths that we might depend on, like + // the exception thunk. + + RefPtr<JITCode> jitCode = state->jitCode; + VM* vm = &state->graph.m_vm; + + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + linkBuffer.link( + generatorJump, CodeLocationLabel( + vm->getCTIStub( + lazySlowPathGenerationThunkGenerator).code())); + + CodeLocationJump linkedPatchableJump = CodeLocationJump( + linkBuffer.locationOf(patchableJump)); + CodeLocationLabel linkedDone = linkBuffer.locationOf(done); + + CallSiteIndex callSiteIndex = + jitCode->common.addUniqueCallSiteIndex(origin); + + std::unique_ptr<LazySlowPath> lazySlowPath = + std::make_unique<LazySlowPath>( + linkedPatchableJump, linkedDone, + exceptionTarget->label(linkBuffer), usedRegisters, + callSiteIndex, generator); + + jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath); + }); + }); + }); + return result; + } + + void speculate( + ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition) + { + appendOSRExit(kind, lowValue, highValue, failCondition, m_origin); + } + + void terminate(ExitKind kind) + { + speculate(kind, noValue(), nullptr, m_out.booleanTrue); + didAlreadyTerminate(); + } + + void didAlreadyTerminate() + { + m_state.setIsValid(false); + } + + void typeCheck( + FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough, + LValue failCondition, ExitKind exitKind = BadType) + { + appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind); + } + + void appendTypeCheck( + FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough, + LValue failCondition, ExitKind exitKind) + { + if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough)) + return; + ASSERT(mayHaveTypeCheck(highValue.useKind())); + appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin); + m_interpreter.filter(highValue, typesPassedThrough); + } + + LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use)); + + if (edge->hasConstant()) { + JSValue value = edge->asJSValue(); + if (!value.isInt32()) { + terminate(Uncountable); + return m_out.int32Zero; + } + return m_out.constInt32(value.asInt32()); + } + + LoweredNodeValue value = m_int32Values.get(edge.node()); + if (isValid(value)) + return value.value(); + + value = m_strictInt52Values.get(edge.node()); + if (isValid(value)) + return strictInt52ToInt32(edge, value.value()); + + value = m_int52Values.get(edge.node()); + if (isValid(value)) + return strictInt52ToInt32(edge, int52ToStrictInt52(value.value())); + + value = m_jsValueValues.get(edge.node()); + if (isValid(value)) { + LValue boxedResult = value.value(); + FTL_TYPE_CHECK( + jsValueValue(boxedResult), edge, SpecInt32, isNotInt32(boxedResult)); + LValue result = unboxInt32(boxedResult); + setInt32(edge.node(), result); + return result; + } + + DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32)); + terminate(Uncountable); + return m_out.int32Zero; + } + + enum Int52Kind { StrictInt52, Int52 }; + LValue lowInt52(Edge edge, Int52Kind kind) + { + DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse); + + LoweredNodeValue value; + + switch (kind) { + case Int52: + value = m_int52Values.get(edge.node()); + if (isValid(value)) + return value.value(); + + value = m_strictInt52Values.get(edge.node()); + if (isValid(value)) + return strictInt52ToInt52(value.value()); + break; + + case StrictInt52: + value = m_strictInt52Values.get(edge.node()); + if (isValid(value)) + return value.value(); + + value = m_int52Values.get(edge.node()); + if (isValid(value)) + return int52ToStrictInt52(value.value()); + break; + } + + DFG_ASSERT(m_graph, m_node, !provenType(edge)); + terminate(Uncountable); + return m_out.int64Zero; + } + + LValue lowInt52(Edge edge) + { + return lowInt52(edge, Int52); + } + + LValue lowStrictInt52(Edge edge) + { + return lowInt52(edge, StrictInt52); + } + + bool betterUseStrictInt52(Node* node) + { + return !isValid(m_int52Values.get(node)); + } + bool betterUseStrictInt52(Edge edge) + { + return betterUseStrictInt52(edge.node()); + } + template<typename T> + Int52Kind bestInt52Kind(T node) + { + return betterUseStrictInt52(node) ? StrictInt52 : Int52; + } + Int52Kind opposite(Int52Kind kind) + { + switch (kind) { + case Int52: + return StrictInt52; + case StrictInt52: + return Int52; + } + DFG_CRASH(m_graph, m_node, "Bad use kind"); + return Int52; + } + + LValue lowWhicheverInt52(Edge edge, Int52Kind& kind) + { + kind = bestInt52Kind(edge); + return lowInt52(edge, kind); + } + + LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind())); + + if (edge->op() == JSConstant) { + JSValue value = edge->asJSValue(); + if (!value.isCell()) { + terminate(Uncountable); + return m_out.intPtrZero; + } + return m_out.constIntPtr(value.asCell()); + } + + LoweredNodeValue value = m_jsValueValues.get(edge.node()); + if (isValid(value)) { + LValue uncheckedValue = value.value(); + FTL_TYPE_CHECK( + jsValueValue(uncheckedValue), edge, SpecCell, isNotCell(uncheckedValue)); + return uncheckedValue; + } + + DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCell)); + terminate(Uncountable); + return m_out.intPtrZero; + } + + LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse); + + LValue result = lowCell(edge, mode); + speculateObject(edge, result); + return result; + } + + LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse); + + LValue result = lowCell(edge, mode); + speculateString(edge, result); + return result; + } + + LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse); + + LValue string = lowString(edge, mode); + LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value); + speculateStringIdent(edge, string, stringImpl); + return stringImpl; + } + + LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse); + + LValue result = lowCell(edge, mode); + speculateSymbol(edge, result); + return result; + } + + LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse); + + LValue result = lowCell(edge, mode); + speculateNonNullObject(edge, result); + return result; + } + + LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse); + + if (edge->hasConstant()) { + JSValue value = edge->asJSValue(); + if (!value.isBoolean()) { + terminate(Uncountable); + return m_out.booleanFalse; + } + return m_out.constBool(value.asBoolean()); + } + + LoweredNodeValue value = m_booleanValues.get(edge.node()); + if (isValid(value)) + return value.value(); + + value = m_jsValueValues.get(edge.node()); + if (isValid(value)) { + LValue unboxedResult = value.value(); + FTL_TYPE_CHECK( + jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult)); + LValue result = unboxBoolean(unboxedResult); + setBoolean(edge.node(), result); + return result; + } + + DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean)); + terminate(Uncountable); + return m_out.booleanFalse; + } + + LValue lowDouble(Edge edge) + { + DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind())); + + LoweredNodeValue value = m_doubleValues.get(edge.node()); + if (isValid(value)) + return value.value(); + DFG_ASSERT(m_graph, m_node, !provenType(edge)); + terminate(Uncountable); + return m_out.doubleZero; + } + + LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) + { + DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse); + DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind())); + DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse); + + if (edge->hasConstant()) + return m_out.constInt64(JSValue::encode(edge->asJSValue())); + + LoweredNodeValue value = m_jsValueValues.get(edge.node()); + if (isValid(value)) + return value.value(); + + value = m_int32Values.get(edge.node()); + if (isValid(value)) { + LValue result = boxInt32(value.value()); + setJSValue(edge.node(), result); + return result; + } + + value = m_booleanValues.get(edge.node()); + if (isValid(value)) { + LValue result = boxBoolean(value.value()); + setJSValue(edge.node(), result); + return result; + } + + DFG_CRASH(m_graph, m_node, "Value not defined"); + return 0; + } + + LValue lowStorage(Edge edge) + { + LoweredNodeValue value = m_storageValues.get(edge.node()); + if (isValid(value)) + return value.value(); + + LValue result = lowCell(edge); + setStorage(edge.node(), result); + return result; + } + + LValue strictInt52ToInt32(Edge edge, LValue value) + { + LValue result = m_out.castToInt32(value); + FTL_TYPE_CHECK( + noValue(), edge, SpecInt32, + m_out.notEqual(m_out.signExt32To64(result), value)); + setInt32(edge.node(), result); + return result; + } + + LValue strictInt52ToDouble(LValue value) + { + return m_out.intToDouble(value); + } + + LValue strictInt52ToJSValue(LValue value) + { + LBasicBlock isInt32 = m_out.newBlock(); + LBasicBlock isDouble = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + Vector<ValueFromBlock, 2> results; + + LValue int32Value = m_out.castToInt32(value); + m_out.branch( + m_out.equal(m_out.signExt32To64(int32Value), value), + unsure(isInt32), unsure(isDouble)); + + LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble); + + results.append(m_out.anchor(boxInt32(int32Value))); + m_out.jump(continuation); + + m_out.appendTo(isDouble, continuation); + + results.append(m_out.anchor(boxDouble(m_out.intToDouble(value)))); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + return m_out.phi(m_out.int64, results); + } + + LValue strictInt52ToInt52(LValue value) + { + return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount)); + } + + LValue int52ToStrictInt52(LValue value) + { + return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount)); + } + + LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecInt32)) + return proven; + return m_out.aboveOrEqual(jsValue, m_tagTypeNumber); + } + LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~SpecInt32)) + return proven; + return m_out.below(jsValue, m_tagTypeNumber); + } + LValue unboxInt32(LValue jsValue) + { + return m_out.castToInt32(jsValue); + } + LValue boxInt32(LValue value) + { + return m_out.add(m_out.zeroExt(value, m_out.int64), m_tagTypeNumber); + } + + LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecCell | SpecMisc)) + return proven; + return m_out.testIsZero64(jsValue, m_tagTypeNumber); + } + LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~(SpecCell | SpecMisc))) + return proven; + return m_out.testNonZero64(jsValue, m_tagTypeNumber); + } + + LValue unboxDouble(LValue jsValue) + { + return m_out.bitCast(m_out.add(jsValue, m_tagTypeNumber), m_out.doubleType); + } + LValue boxDouble(LValue doubleValue) + { + return m_out.sub(m_out.bitCast(doubleValue, m_out.int64), m_tagTypeNumber); + } + + LValue jsValueToStrictInt52(Edge edge, LValue boxedValue) + { + LBasicBlock intCase = m_out.newBlock(); + LBasicBlock doubleCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue isNotInt32; + if (!m_interpreter.needsTypeCheck(edge, SpecInt32)) + isNotInt32 = m_out.booleanFalse; + else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32)) + isNotInt32 = m_out.booleanTrue; + else + isNotInt32 = this->isNotInt32(boxedValue); + m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase)); + + LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase); + + ValueFromBlock intToInt52 = m_out.anchor( + m_out.signExt32To64(unboxInt32(boxedValue))); + m_out.jump(continuation); + + m_out.appendTo(doubleCase, continuation); + + LValue possibleResult = m_out.call( + m_out.int64, m_out.operation(operationConvertBoxedDoubleToInt52), boxedValue); + FTL_TYPE_CHECK( + jsValueValue(boxedValue), edge, SpecInt32 | SpecInt52AsDouble, + m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52))); + + ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + return m_out.phi(m_out.int64, intToInt52, doubleToInt52); + } + + LValue doubleToStrictInt52(Edge edge, LValue value) + { + LValue possibleResult = m_out.call( + m_out.int64, m_out.operation(operationConvertDoubleToInt52), value); + FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow, + doubleValue(value), edge, SpecInt52AsDouble, + m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52))); + + return possibleResult; + } + + LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero) + { + LValue integerValue = m_out.doubleToInt(value); + LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue); + LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble); + speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger); + + if (shouldCheckNegativeZero) { + LBasicBlock valueIsZero = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation); + + LValue doubleBitcastToInt64 = m_out.bitCast(value, m_out.int64); + LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0)); + + speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet); + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + } + return integerValue; + } + + LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecFullNumber)) + return proven; + return isNotCellOrMisc(jsValue); + } + LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~SpecFullNumber)) + return proven; + return isCellOrMisc(jsValue); + } + + LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~SpecCell)) + return proven; + return m_out.testNonZero64(jsValue, m_tagMask); + } + + LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecCell)) + return proven; + return m_out.testIsZero64(jsValue, m_tagMask); + } + + LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~SpecMisc)) + return proven; + return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined)); + } + + LValue isMisc(LValue value, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecMisc)) + return proven; + return m_out.logicalNot(isNotMisc(value)); + } + + LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~SpecBoolean)) + return proven; + return m_out.testNonZero64( + m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)), + m_out.constInt64(~1)); + } + LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecBoolean)) + return proven; + return m_out.logicalNot(isNotBoolean(jsValue)); + } + LValue unboxBoolean(LValue jsValue) + { + // We want to use a cast that guarantees that B3 knows that even the integer + // value is just 0 or 1. But for now we do it the dumb way. + return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1))); + } + LValue boxBoolean(LValue value) + { + return m_out.select( + value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse)); + } + + LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, ~SpecOther)) + return proven; + return m_out.notEqual( + m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)), + m_out.constInt64(ValueNull)); + } + LValue isOther(LValue value, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type, SpecOther)) + return proven; + return m_out.equal( + m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)), + m_out.constInt64(ValueNull)); + } + + LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType) + { + if (!(provenType & ~wantedType)) + return m_out.booleanTrue; + if (!(provenType & wantedType)) + return m_out.booleanFalse; + return nullptr; + } + + void speculate(Edge edge) + { + switch (edge.useKind()) { + case UntypedUse: + break; + case KnownInt32Use: + case KnownStringUse: + case KnownPrimitiveUse: + case DoubleRepUse: + case Int52RepUse: + ASSERT(!m_interpreter.needsTypeCheck(edge)); + break; + case Int32Use: + speculateInt32(edge); + break; + case CellUse: + speculateCell(edge); + break; + case CellOrOtherUse: + speculateCellOrOther(edge); + break; + case KnownCellUse: + ASSERT(!m_interpreter.needsTypeCheck(edge)); + break; + case MachineIntUse: + speculateMachineInt(edge); + break; + case ObjectUse: + speculateObject(edge); + break; + case FunctionUse: + speculateFunction(edge); + break; + case ObjectOrOtherUse: + speculateObjectOrOther(edge); + break; + case FinalObjectUse: + speculateFinalObject(edge); + break; + case RegExpObjectUse: + speculateRegExpObject(edge); + break; + case StringUse: + speculateString(edge); + break; + case StringOrOtherUse: + speculateStringOrOther(edge); + break; + case StringIdentUse: + speculateStringIdent(edge); + break; + case SymbolUse: + speculateSymbol(edge); + break; + case StringObjectUse: + speculateStringObject(edge); + break; + case StringOrStringObjectUse: + speculateStringOrStringObject(edge); + break; + case NumberUse: + speculateNumber(edge); + break; + case RealNumberUse: + speculateRealNumber(edge); + break; + case DoubleRepRealUse: + speculateDoubleRepReal(edge); + break; + case DoubleRepMachineIntUse: + speculateDoubleRepMachineInt(edge); + break; + case BooleanUse: + speculateBoolean(edge); + break; + case NotStringVarUse: + speculateNotStringVar(edge); + break; + case NotCellUse: + speculateNotCell(edge); + break; + case OtherUse: + speculateOther(edge); + break; + case MiscUse: + speculateMisc(edge); + break; + default: + DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind"); + } + } + + void speculate(Node*, Edge edge) + { + speculate(edge); + } + + void speculateInt32(Edge edge) + { + lowInt32(edge); + } + + void speculateCell(Edge edge) + { + lowCell(edge); + } + + void speculateCellOrOther(Edge edge) + { + LValue value = lowJSValue(edge, ManualOperandSpeculation); + + LBasicBlock isNotCell = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell)); + + LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation); + FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void speculateMachineInt(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge)) + return; + + jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation)); + } + + LValue isObject(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, SpecObject)) + return proven; + return m_out.aboveOrEqual( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType), + m_out.constInt32(ObjectType)); + } + + LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject)) + return proven; + return m_out.below( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType), + m_out.constInt32(ObjectType)); + } + + LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, ~SpecString)) + return proven; + return m_out.notEqual( + m_out.load32(cell, m_heaps.JSCell_structureID), + m_out.constInt32(vm().stringStructure->id())); + } + + LValue isString(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, SpecString)) + return proven; + return m_out.equal( + m_out.load32(cell, m_heaps.JSCell_structureID), + m_out.constInt32(vm().stringStructure->id())); + } + + LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol)) + return proven; + return m_out.notEqual( + m_out.load32(cell, m_heaps.JSCell_structureID), + m_out.constInt32(vm().symbolStructure->id())); + } + + LValue isArrayType(LValue cell, ArrayMode arrayMode) + { + switch (arrayMode.type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: { + LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingType); + + switch (arrayMode.arrayClass()) { + case Array::OriginalArray: + DFG_CRASH(m_graph, m_node, "Unexpected original array"); + return 0; + + case Array::Array: + return m_out.equal( + m_out.bitAnd(indexingType, m_out.constInt32(IsArray | IndexingShapeMask)), + m_out.constInt32(IsArray | arrayMode.shapeMask())); + + case Array::NonArray: + case Array::OriginalNonArray: + return m_out.equal( + m_out.bitAnd(indexingType, m_out.constInt32(IsArray | IndexingShapeMask)), + m_out.constInt32(arrayMode.shapeMask())); + + case Array::PossiblyArray: + return m_out.equal( + m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), + m_out.constInt32(arrayMode.shapeMask())); + } + + DFG_CRASH(m_graph, m_node, "Corrupt array class"); + } + + case Array::DirectArguments: + return m_out.equal( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType), + m_out.constInt32(DirectArgumentsType)); + + case Array::ScopedArguments: + return m_out.equal( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType), + m_out.constInt32(ScopedArgumentsType)); + + default: + return m_out.equal( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType), + m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType()))); + } + } + + LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, SpecFunction)) + return proven; + return isType(cell, JSFunctionType); + } + LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop) + { + if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction)) + return proven; + return isNotType(cell, JSFunctionType); + } + + LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop) + { + if (!(type & SpecObjectOther)) + return m_out.booleanFalse; + return m_out.testNonZero32( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData)); + } + + LValue isType(LValue cell, JSType type) + { + return m_out.equal( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType), + m_out.constInt32(type)); + } + + LValue isNotType(LValue cell, JSType type) + { + return m_out.logicalNot(isType(cell, type)); + } + + void speculateObject(Edge edge, LValue cell) + { + FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell)); + } + + void speculateObject(Edge edge) + { + speculateObject(edge, lowCell(edge)); + } + + void speculateFunction(Edge edge, LValue cell) + { + FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell)); + } + + void speculateFunction(Edge edge) + { + speculateFunction(edge, lowCell(edge)); + } + + void speculateObjectOrOther(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge)) + return; + + LValue value = lowJSValue(edge, ManualOperandSpeculation); + + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock primitiveCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase); + + FTL_TYPE_CHECK( + jsValueValue(value), edge, (~SpecCell) | SpecObject, isNotObject(value)); + + m_out.jump(continuation); + + m_out.appendTo(primitiveCase, continuation); + + FTL_TYPE_CHECK( + jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value)); + + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void speculateFinalObject(Edge edge, LValue cell) + { + FTL_TYPE_CHECK( + jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType)); + } + + void speculateFinalObject(Edge edge) + { + speculateFinalObject(edge, lowCell(edge)); + } + + void speculateRegExpObject(Edge edge, LValue cell) + { + FTL_TYPE_CHECK( + jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType)); + } + + void speculateRegExpObject(Edge edge) + { + speculateRegExpObject(edge, lowCell(edge)); + } + + void speculateString(Edge edge, LValue cell) + { + FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString | ~SpecCell, isNotString(cell)); + } + + void speculateString(Edge edge) + { + speculateString(edge, lowCell(edge)); + } + + void speculateStringOrOther(Edge edge, LValue value) + { + LBasicBlock cellCase = m_out.newBlock(); + LBasicBlock notCellCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase)); + + LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase); + + FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCell) | SpecString, isNotString(value)); + + m_out.jump(continuation); + m_out.appendTo(notCellCase, continuation); + + FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCell | SpecOther, isNotOther(value)); + + m_out.jump(continuation); + m_out.appendTo(continuation, lastNext); + } + + void speculateStringOrOther(Edge edge) + { + speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation)); + } + + void speculateStringIdent(Edge edge, LValue string, LValue stringImpl) + { + if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString)) + return; + + speculate(BadType, jsValueValue(string), edge.node(), m_out.isNull(stringImpl)); + speculate( + BadType, jsValueValue(string), edge.node(), + m_out.testIsZero32( + m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), + m_out.constInt32(StringImpl::flagIsAtomic()))); + m_interpreter.filter(edge, SpecStringIdent | ~SpecString); + } + + void speculateStringIdent(Edge edge) + { + lowStringIdent(edge); + } + + void speculateStringObject(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge, SpecStringObject)) + return; + + speculateStringObjectForCell(edge, lowCell(edge)); + m_interpreter.filter(edge, SpecStringObject); + } + + void speculateStringOrStringObject(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject)) + return; + + LBasicBlock notString = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + LValue structureID = m_out.load32(lowCell(edge), m_heaps.JSCell_structureID); + m_out.branch( + m_out.equal(structureID, m_out.constInt32(vm().stringStructure->id())), + unsure(continuation), unsure(notString)); + + LBasicBlock lastNext = m_out.appendTo(notString, continuation); + speculateStringObjectForStructureID(edge, structureID); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + + m_interpreter.filter(edge, SpecString | SpecStringObject); + } + + void speculateStringObjectForCell(Edge edge, LValue cell) + { + speculateStringObjectForStructureID(edge, m_out.load32(cell, m_heaps.JSCell_structureID)); + } + + void speculateStringObjectForStructureID(Edge edge, LValue structureID) + { + Structure* stringObjectStructure = + m_graph.globalObjectFor(m_node->origin.semantic)->stringObjectStructure(); + + if (abstractStructure(edge).isSubsetOf(StructureSet(stringObjectStructure))) + return; + + speculate( + NotStringObject, noValue(), 0, + m_out.notEqual(structureID, weakStructureID(stringObjectStructure))); + } + + void speculateSymbol(Edge edge, LValue cell) + { + FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol | ~SpecCell, isNotSymbol(cell)); + } + + void speculateSymbol(Edge edge) + { + speculateSymbol(edge, lowCell(edge)); + } + + void speculateNonNullObject(Edge edge, LValue cell) + { + FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell)); + if (masqueradesAsUndefinedWatchpointIsStillValid()) + return; + + speculate( + BadType, jsValueValue(cell), edge.node(), + m_out.testNonZero32( + m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags), + m_out.constInt32(MasqueradesAsUndefined))); + } + + void speculateNumber(Edge edge) + { + LValue value = lowJSValue(edge, ManualOperandSpeculation); + FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value)); + } + + void speculateRealNumber(Edge edge) + { + // Do an early return here because lowDouble() can create a lot of control flow. + if (!m_interpreter.needsTypeCheck(edge)) + return; + + LValue value = lowJSValue(edge, ManualOperandSpeculation); + LValue doubleValue = unboxDouble(value); + + LBasicBlock intCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.doubleEqual(doubleValue, doubleValue), + usually(continuation), rarely(intCase)); + + LBasicBlock lastNext = m_out.appendTo(intCase, continuation); + + typeCheck( + jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber, + isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void speculateDoubleRepReal(Edge edge) + { + // Do an early return here because lowDouble() can create a lot of control flow. + if (!m_interpreter.needsTypeCheck(edge)) + return; + + LValue value = lowDouble(edge); + FTL_TYPE_CHECK( + doubleValue(value), edge, SpecDoubleReal, + m_out.doubleNotEqualOrUnordered(value, value)); + } + + void speculateDoubleRepMachineInt(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge)) + return; + + doubleToStrictInt52(edge, lowDouble(edge)); + } + + void speculateBoolean(Edge edge) + { + lowBoolean(edge); + } + + void speculateNotStringVar(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar)) + return; + + LValue value = lowJSValue(edge, ManualOperandSpeculation); + + LBasicBlock isCellCase = m_out.newBlock(); + LBasicBlock isStringCase = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation)); + + LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase); + m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation)); + + m_out.appendTo(isStringCase, continuation); + speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value)); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + void speculateNotCell(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge)) + return; + + LValue value = lowJSValue(edge, ManualOperandSpeculation); + typeCheck(jsValueValue(value), edge, ~SpecCell, isCell(value)); + } + + void speculateOther(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge)) + return; + + LValue value = lowJSValue(edge, ManualOperandSpeculation); + typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value)); + } + + void speculateMisc(Edge edge) + { + if (!m_interpreter.needsTypeCheck(edge)) + return; + + LValue value = lowJSValue(edge, ManualOperandSpeculation); + typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value)); + } + + bool masqueradesAsUndefinedWatchpointIsStillValid() + { + return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic); + } + + LValue loadCellState(LValue base) + { + return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState); + } + + void emitStoreBarrier(LValue base) + { + LBasicBlock slowPath = m_out.newBlock(); + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + m_out.notZero32(loadCellState(base)), usually(continuation), rarely(slowPath)); + + LBasicBlock lastNext = m_out.appendTo(slowPath, continuation); + + // We emit the store barrier slow path lazily. In a lot of cases, this will never fire. And + // when it does fire, it makes sense for us to generate this code using our JIT rather than + // wasting B3's time optimizing it. + lazySlowPath( + [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> { + GPRReg baseGPR = locations[1].directGPR(); + + return LazySlowPath::createGenerator( + [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) { + RegisterSet usedRegisters = params.lazySlowPath->usedRegisters(); + ScratchRegisterAllocator scratchRegisterAllocator(usedRegisters); + scratchRegisterAllocator.lock(baseGPR); + + GPRReg scratch1 = scratchRegisterAllocator.allocateScratchGPR(); + GPRReg scratch2 = scratchRegisterAllocator.allocateScratchGPR(); + + ScratchRegisterAllocator::PreservedState preservedState = + scratchRegisterAllocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall); + + // We've already saved these, so when we make a slow path call, we don't have + // to save them again. + usedRegisters.exclude(RegisterSet(scratch1, scratch2)); + + WriteBarrierBuffer& writeBarrierBuffer = jit.vm()->heap.writeBarrierBuffer(); + jit.load32(writeBarrierBuffer.currentIndexAddress(), scratch2); + CCallHelpers::Jump needToFlush = jit.branch32( + CCallHelpers::AboveOrEqual, scratch2, + CCallHelpers::TrustedImm32(writeBarrierBuffer.capacity())); + + jit.add32(CCallHelpers::TrustedImm32(1), scratch2); + jit.store32(scratch2, writeBarrierBuffer.currentIndexAddress()); + + jit.move(CCallHelpers::TrustedImmPtr(writeBarrierBuffer.buffer()), scratch1); + jit.storePtr( + baseGPR, + CCallHelpers::BaseIndex( + scratch1, scratch2, CCallHelpers::ScalePtr, + static_cast<int32_t>(-sizeof(void*)))); + + scratchRegisterAllocator.restoreReusedRegistersByPopping(jit, preservedState); + + params.doneJumps.append(jit.jump()); + + needToFlush.link(&jit); + callOperation( + usedRegisters, jit, params.lazySlowPath->callSiteIndex(), + params.exceptionJumps, operationFlushWriteBarrierBuffer, InvalidGPRReg, + baseGPR); + scratchRegisterAllocator.restoreReusedRegistersByPopping(jit, preservedState); + params.doneJumps.append(jit.jump()); + }); + }, + base); + m_out.jump(continuation); + + m_out.appendTo(continuation, lastNext); + } + + template<typename... Args> + LValue vmCall(LType type, LValue function, Args... args) + { + callPreflight(); + LValue result = m_out.call(type, function, args...); + callCheck(); + return result; + } + + void callPreflight(CodeOrigin codeOrigin) + { + CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(codeOrigin); + m_out.store32( + m_out.constInt32(callSiteIndex.bits()), + tagFor(JSStack::ArgumentCount)); + } + + void callPreflight() + { + callPreflight(codeOriginDescriptionOfCallSite()); + } + + CodeOrigin codeOriginDescriptionOfCallSite() const + { + CodeOrigin codeOrigin = m_node->origin.semantic; + if (m_node->op() == TailCallInlinedCaller + || m_node->op() == TailCallVarargsInlinedCaller + || m_node->op() == TailCallForwardVarargsInlinedCaller) { + // This case arises when you have a situation like this: + // foo makes a call to bar, bar is inlined in foo. bar makes a call + // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz, + // and jaz is inlined in baz. We want the callframe for jaz to appear to + // have caller be bar. + codeOrigin = *codeOrigin.inlineCallFrame->getCallerSkippingTailCalls(); + } + + return codeOrigin; + } + + void callCheck() + { + if (Options::useExceptionFuzz()) + m_out.call(m_out.voidType, m_out.operation(operationExceptionFuzz), m_callFrame); + + LValue exception = m_out.load64(m_out.absolute(vm().addressOfException())); + LValue hadException = m_out.notZero64(exception); + + CodeOrigin opCatchOrigin; + HandlerInfo* exceptionHandler; + if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) { + bool exitOK = true; + bool isExceptionHandler = true; + appendOSRExit( + ExceptionCheck, noValue(), nullptr, hadException, + m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler); + return; + } + + LBasicBlock continuation = m_out.newBlock(); + + m_out.branch( + hadException, rarely(m_handleExceptions), usually(continuation)); + + m_out.appendTo(continuation); + } + + RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value) + { + CodeOrigin opCatchOrigin; + HandlerInfo* exceptionHandler; + bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler); + if (!willCatchException) + return PatchpointExceptionHandle::defaultHandle(m_ftlState); + + if (verboseCompilationEnabled()) { + dataLog(" Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n"); + if (!m_availableRecoveries.isEmpty()) + dataLog(" Available recoveries: ", listDump(m_availableRecoveries), "\n"); + } + + bool exitOK = true; + NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK); + + OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr); + + // Compute the offset into the StackmapGenerationParams where we will find the exit arguments + // we are about to append. We need to account for both the children we've already added, and + // for the possibility of a result value if the patchpoint is not void. + unsigned offset = value->numChildren(); + if (value->type() != Void) + offset++; + + // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's + // result and with any late-clobbered registers. + value->appendVectorWithRep( + buildExitArguments(exitDescriptor, opCatchOrigin, noValue()), + ValueRep::LateColdAny); + + return PatchpointExceptionHandle::create( + m_ftlState, exitDescriptor, origin, offset, *exceptionHandler); + } + + LBasicBlock lowBlock(DFG::BasicBlock* block) + { + return m_blocks.get(block); + } + + OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue) + { + return &m_ftlState.jitCode->osrExitDescriptors.alloc( + lowValue.format(), m_graph.methodOfGettingAValueProfileFor(highValue), + availabilityMap().m_locals.numberOfArguments(), + availabilityMap().m_locals.numberOfLocals()); + } + + void appendOSRExit( + ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition, + NodeOrigin origin, bool isExceptionHandler = false) + { + if (verboseCompilationEnabled()) { + dataLog(" OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap(), "\n"); + if (!m_availableRecoveries.isEmpty()) + dataLog(" Available recoveries: ", listDump(m_availableRecoveries), "\n"); + } + + DFG_ASSERT(m_graph, m_node, origin.exitOK); + + if (doOSRExitFuzzing() && !isExceptionHandler) { + LValue numberOfFuzzChecks = m_out.add( + m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)), + m_out.int32One); + + m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks)); + + if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) { + failCondition = m_out.bitOr( + failCondition, + m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter))); + } + if (unsigned at = Options::fireOSRExitFuzzAt()) { + failCondition = m_out.bitOr( + failCondition, + m_out.equal(numberOfFuzzChecks, m_out.constInt32(at))); + } + } + + if (failCondition == m_out.booleanFalse) + return; + + blessSpeculation( + m_out.speculate(failCondition), kind, lowValue, highValue, origin); + } + + void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin) + { + OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, highValue); + + value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue)); + + State* state = &m_ftlState; + value->setGenerator( + [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { + exitDescriptor->emitOSRExit( + *state, kind, origin, jit, params, 0); + }); + } + + StackmapArgumentList buildExitArguments( + OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue, + unsigned offsetOfExitArgumentsInStackmapLocations = 0) + { + StackmapArgumentList result; + buildExitArguments( + exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations); + return result; + } + + void buildExitArguments( + OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue, + unsigned offsetOfExitArgumentsInStackmapLocations = 0) + { + if (!!lowValue) + arguments.append(lowValue.value()); + + AvailabilityMap availabilityMap = this->availabilityMap(); + availabilityMap.pruneByLiveness(m_graph, exitOrigin); + + HashMap<Node*, ExitTimeObjectMaterialization*> map; + availabilityMap.forEachAvailability( + [&] (Availability availability) { + if (!availability.shouldUseNode()) + return; + + Node* node = availability.node(); + if (!node->isPhantomAllocation()) + return; + + auto result = map.add(node, nullptr); + if (result.isNewEntry) { + result.iterator->value = + exitDescriptor->m_materializations.add(node->op(), node->origin.semantic); + } + }); + + for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) { + int operand = exitDescriptor->m_values.operandForIndex(i); + + Availability availability = availabilityMap.m_locals[i]; + + if (Options::validateFTLOSRExitLiveness()) { + DFG_ASSERT( + m_graph, m_node, + (!(availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))) || m_graph.m_plan.mode == FTLForOSREntryMode); + } + ExitValue exitValue = exitValueForAvailability(arguments, map, availability); + if (exitValue.hasIndexInStackmapLocations()) + exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations); + exitDescriptor->m_values[i] = exitValue; + } + + for (auto heapPair : availabilityMap.m_heap) { + Node* node = heapPair.key.base(); + ExitTimeObjectMaterialization* materialization = map.get(node); + ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value); + if (exitValue.hasIndexInStackmapLocations()) + exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations); + materialization->add( + heapPair.key.descriptor(), + exitValue); + } + + if (verboseCompilationEnabled()) { + dataLog(" Exit values: ", exitDescriptor->m_values, "\n"); + if (!exitDescriptor->m_materializations.isEmpty()) { + dataLog(" Materializations: \n"); + for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations) + dataLog(" ", pointerDump(materialization), "\n"); + } + } + } + + ExitValue exitValueForAvailability( + StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map, + Availability availability) + { + FlushedAt flush = availability.flushedAt(); + switch (flush.format()) { + case DeadFlush: + case ConflictingFlush: + if (availability.hasNode()) + return exitValueForNode(arguments, map, availability.node()); + + // This means that the value is dead. It could be dead in bytecode or it could have + // been killed by our DCE, which can sometimes kill things even if they were live in + // bytecode. + return ExitValue::dead(); + + case FlushedJSValue: + case FlushedCell: + case FlushedBoolean: + return ExitValue::inJSStack(flush.virtualRegister()); + + case FlushedInt32: + return ExitValue::inJSStackAsInt32(flush.virtualRegister()); + + case FlushedInt52: + return ExitValue::inJSStackAsInt52(flush.virtualRegister()); + + case FlushedDouble: + return ExitValue::inJSStackAsDouble(flush.virtualRegister()); + } + + DFG_CRASH(m_graph, m_node, "Invalid flush format"); + return ExitValue::dead(); + } + + ExitValue exitValueForNode( + StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map, + Node* node) + { + // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the + // stackmap value. Like all values, the stackmap value cannot use a child that is defined after + // it. + + ASSERT(node->shouldGenerate()); + ASSERT(node->hasResult()); + + if (node) { + switch (node->op()) { + case BottomValue: + // This might arise in object materializations. I actually doubt that it would, + // but it seems worthwhile to be conservative. + return ExitValue::dead(); + + case JSConstant: + case Int52Constant: + case DoubleConstant: + return ExitValue::constant(node->asJSValue()); + + default: + if (node->isPhantomAllocation()) + return ExitValue::materializeNewObject(map.get(node)); + break; + } + } + + for (unsigned i = 0; i < m_availableRecoveries.size(); ++i) { + AvailableRecovery recovery = m_availableRecoveries[i]; + if (recovery.node() != node) + continue; + ExitValue result = ExitValue::recovery( + recovery.opcode(), arguments.size(), arguments.size() + 1, + recovery.format()); + arguments.append(recovery.left()); + arguments.append(recovery.right()); + return result; + } + + LoweredNodeValue value = m_int32Values.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatInt32, value.value()); + + value = m_int52Values.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatInt52, value.value()); + + value = m_strictInt52Values.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatStrictInt52, value.value()); + + value = m_booleanValues.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatBoolean, value.value()); + + value = m_jsValueValues.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatJS, value.value()); + + value = m_doubleValues.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatDouble, value.value()); + + DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data()); + return ExitValue::dead(); + } + + ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value) + { + ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size())); + arguments.append(value); + return result; + } + + ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node) + { + ASSERT(node->shouldGenerate()); + ASSERT(node->hasResult()); + + switch (node->op()) { + case JSConstant: + case Int52Constant: + case DoubleConstant: + return ExitValue::constant(node->asJSValue()); + + default: + break; + } + + LoweredNodeValue value = m_jsValueValues.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatJS, value.value()); + + value = m_int32Values.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatJS, boxInt32(value.value())); + + value = m_booleanValues.get(node); + if (isValid(value)) + return exitArgument(arguments, DataFormatJS, boxBoolean(value.value())); + + // Doubles and Int52 have been converted by ValueRep() + DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data()); + } + + bool doesKill(Edge edge) + { + if (edge.doesNotKill()) + return false; + + if (edge->hasConstant()) + return false; + + return true; + } + + void addAvailableRecovery( + Node* node, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format) + { + m_availableRecoveries.append(AvailableRecovery(node, opcode, left, right, format)); + } + + void addAvailableRecovery( + Edge edge, RecoveryOpcode opcode, LValue left, LValue right, DataFormat format) + { + addAvailableRecovery(edge.node(), opcode, left, right, format); + } + + void setInt32(Node* node, LValue value) + { + m_int32Values.set(node, LoweredNodeValue(value, m_highBlock)); + } + void setInt52(Node* node, LValue value) + { + m_int52Values.set(node, LoweredNodeValue(value, m_highBlock)); + } + void setStrictInt52(Node* node, LValue value) + { + m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock)); + } + void setInt52(Node* node, LValue value, Int52Kind kind) + { + switch (kind) { + case Int52: + setInt52(node, value); + return; + + case StrictInt52: + setStrictInt52(node, value); + return; + } + + DFG_CRASH(m_graph, m_node, "Corrupt int52 kind"); + } + void setJSValue(Node* node, LValue value) + { + m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock)); + } + void setBoolean(Node* node, LValue value) + { + m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock)); + } + void setStorage(Node* node, LValue value) + { + m_storageValues.set(node, LoweredNodeValue(value, m_highBlock)); + } + void setDouble(Node* node, LValue value) + { + m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock)); + } + + void setInt32(LValue value) + { + setInt32(m_node, value); + } + void setInt52(LValue value) + { + setInt52(m_node, value); + } + void setStrictInt52(LValue value) + { + setStrictInt52(m_node, value); + } + void setInt52(LValue value, Int52Kind kind) + { + setInt52(m_node, value, kind); + } + void setJSValue(LValue value) + { + setJSValue(m_node, value); + } + void setBoolean(LValue value) + { + setBoolean(m_node, value); + } + void setStorage(LValue value) + { + setStorage(m_node, value); + } + void setDouble(LValue value) + { + setDouble(m_node, value); + } + + bool isValid(const LoweredNodeValue& value) + { + if (!value) + return false; + if (!m_graph.m_dominators->dominates(value.block(), m_highBlock)) + return false; + return true; + } + + void addWeakReference(JSCell* target) + { + m_graph.m_plan.weakReferences.addLazily(target); + } + + LValue loadStructure(LValue value) + { + LValue tableIndex = m_out.load32(value, m_heaps.JSCell_structureID); + LValue tableBase = m_out.loadPtr( + m_out.absolute(vm().heap.structureIDTable().base())); + TypedPointer address = m_out.baseIndex( + m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex)); + return m_out.loadPtr(address); + } + + LValue weakPointer(JSCell* pointer) + { + addWeakReference(pointer); + return m_out.constIntPtr(pointer); + } + + LValue weakStructureID(Structure* structure) + { + addWeakReference(structure); + return m_out.constInt32(structure->id()); + } + + LValue weakStructure(Structure* structure) + { + return weakPointer(structure); + } + + TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0) + { + return m_out.address(base, m_heaps.variables[operand], offset); + } + TypedPointer payloadFor(LValue base, int operand) + { + return addressFor(base, operand, PayloadOffset); + } + TypedPointer tagFor(LValue base, int operand) + { + return addressFor(base, operand, TagOffset); + } + TypedPointer addressFor(int operand, ptrdiff_t offset = 0) + { + return addressFor(VirtualRegister(operand), offset); + } + TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0) + { + if (operand.isLocal()) + return addressFor(m_captured, operand.offset(), offset); + return addressFor(m_callFrame, operand.offset(), offset); + } + TypedPointer payloadFor(int operand) + { + return payloadFor(VirtualRegister(operand)); + } + TypedPointer payloadFor(VirtualRegister operand) + { + return addressFor(operand, PayloadOffset); + } + TypedPointer tagFor(int operand) + { + return tagFor(VirtualRegister(operand)); + } + TypedPointer tagFor(VirtualRegister operand) + { + return addressFor(operand, TagOffset); + } + + AbstractValue abstractValue(Node* node) + { + return m_state.forNode(node); + } + AbstractValue abstractValue(Edge edge) + { + return abstractValue(edge.node()); + } + + SpeculatedType provenType(Node* node) + { + return abstractValue(node).m_type; + } + SpeculatedType provenType(Edge edge) + { + return provenType(edge.node()); + } + + JSValue provenValue(Node* node) + { + return abstractValue(node).m_value; + } + JSValue provenValue(Edge edge) + { + return provenValue(edge.node()); + } + + StructureAbstractValue abstractStructure(Node* node) + { + return abstractValue(node).m_structure; + } + StructureAbstractValue abstractStructure(Edge edge) + { + return abstractStructure(edge.node()); + } + +#if ENABLE(MASM_PROBE) + void probe(std::function<void (CCallHelpers::ProbeContext*)> probeFunc) + { + UNUSED_PARAM(probeFunc); + } +#endif + + void crash() + { + crash(m_highBlock->index, m_node->index()); + } + void crash(BlockIndex blockIndex, unsigned nodeIndex) + { +#if ASSERT_DISABLED + m_out.call(m_out.voidType, m_out.operation(ftlUnreachable)); + UNUSED_PARAM(blockIndex); + UNUSED_PARAM(nodeIndex); +#else + m_out.call( + m_out.voidType, + m_out.constIntPtr(ftlUnreachable), + m_out.constIntPtr(codeBlock()), m_out.constInt32(blockIndex), + m_out.constInt32(nodeIndex)); +#endif + m_out.unreachable(); + } + + AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; } + + VM& vm() { return m_graph.m_vm; } + CodeBlock* codeBlock() { return m_graph.m_codeBlock; } + + Graph& m_graph; + State& m_ftlState; + AbstractHeapRepository m_heaps; + Output m_out; + Procedure& m_proc; + + LBasicBlock m_prologue; + LBasicBlock m_handleExceptions; + HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks; + + LValue m_callFrame; + LValue m_captured; + LValue m_tagTypeNumber; + LValue m_tagMask; + + HashMap<Node*, LoweredNodeValue> m_int32Values; + HashMap<Node*, LoweredNodeValue> m_strictInt52Values; + HashMap<Node*, LoweredNodeValue> m_int52Values; + HashMap<Node*, LoweredNodeValue> m_jsValueValues; + HashMap<Node*, LoweredNodeValue> m_booleanValues; + HashMap<Node*, LoweredNodeValue> m_storageValues; + HashMap<Node*, LoweredNodeValue> m_doubleValues; + + // This is a bit of a hack. It prevents B3 from having to do CSE on loading of arguments. + // It's nice to have these optimizations on our end because we can guarantee them a bit better. + // Probably also saves B3 compile time. + HashMap<Node*, LValue> m_loadedArgumentValues; + + HashMap<Node*, LValue> m_phis; + + LocalOSRAvailabilityCalculator m_availabilityCalculator; + + Vector<AvailableRecovery, 3> m_availableRecoveries; + + InPlaceAbstractState m_state; + AbstractInterpreter<InPlaceAbstractState> m_interpreter; + DFG::BasicBlock* m_highBlock; + DFG::BasicBlock* m_nextHighBlock; + LBasicBlock m_nextLowBlock; + + NodeOrigin m_origin; + unsigned m_nodeIndex; + Node* m_node; +}; + +} // anonymous namespace + +void lowerDFGToB3(State& state) +{ + LowerDFGToB3 lowering(state); + lowering.lower(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h new file mode 100644 index 000000000..f1c3a81e3 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLLowerDFGToB3_h +#define FTLLowerDFGToB3_h + +#if ENABLE(FTL_JIT) + +#include "DFGGraph.h" +#include "FTLState.h" + +namespace JSC { namespace FTL { + +void lowerDFGToB3(State&); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLLowerDFGToB3_h + diff --git a/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h b/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h new file mode 100644 index 000000000..fc60e567f --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLLoweredNodeValue.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLLoweredNodeValue_h +#define FTLLoweredNodeValue_h + +#if ENABLE(FTL_JIT) + +#include "DFGBasicBlock.h" +#include "FTLAbbreviatedTypes.h" + +namespace JSC { namespace FTL { + +// Represents the act of having lowered a DFG::Node to an LValue, and records the +// DFG::BasicBlock that did the lowering. The LValue is what we use most often, but +// we need to verify that we're in a block that is dominated by the one that did +// the lowering. We're guaranteed that for each DFG::Node, there will be one +// LoweredNodeValue that always dominates all uses of the DFG::Node; but there may +// be others that don't dominate and we're effectively doing opportunistic GVN on +// the lowering code. + +class LoweredNodeValue { +public: + LoweredNodeValue() + : m_value(0) + , m_block(0) + { + } + + LoweredNodeValue(LValue value, DFG::BasicBlock* block) + : m_value(value) + , m_block(block) + { + ASSERT(m_value); + ASSERT(m_block); + } + + bool isSet() const { return !!m_value; } + bool operator!() const { return !isSet(); } + + LValue value() const { return m_value; } + DFG::BasicBlock* block() const { return m_block; } + +private: + LValue m_value; + DFG::BasicBlock* m_block; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLLoweredNodeValue_h + diff --git a/Source/JavaScriptCore/ftl/FTLOSREntry.cpp b/Source/JavaScriptCore/ftl/FTLOSREntry.cpp new file mode 100644 index 000000000..601b46094 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSREntry.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLOSREntry.h" + +#include "CallFrame.h" +#include "CodeBlock.h" +#include "DFGJITCode.h" +#include "FTLForOSREntryJITCode.h" +#include "JSStackInlines.h" +#include "OperandsInlines.h" +#include "JSCInlines.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +SUPPRESS_ASAN +void* prepareOSREntry( + ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock, + unsigned bytecodeIndex, unsigned streamIndex) +{ + VM& vm = exec->vm(); + CodeBlock* baseline = dfgCodeBlock->baselineVersion(); + ExecutableBase* executable = dfgCodeBlock->ownerExecutable(); + DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg(); + ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry(); + + if (Options::verboseOSR()) { + dataLog( + "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#", + bytecodeIndex, ".\n"); + } + + if (bytecodeIndex) + jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true); + + if (bytecodeIndex != entryCode->bytecodeIndex()) { + if (Options::verboseOSR()) + dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n"); + return 0; + } + + Operands<JSValue> values; + dfgCode->reconstruct( + exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values); + + if (Options::verboseOSR()) + dataLog(" Values at entry: ", values, "\n"); + + for (int argument = values.numberOfArguments(); argument--;) { + JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue(); + JSValue reconstructedValue = values.argument(argument); + if (valueOnStack == reconstructedValue || !argument) + continue; + dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n"); + dataLog(" Value on stack: ", valueOnStack, "\n"); + dataLog(" Reconstructed value: ", reconstructedValue, "\n"); + RELEASE_ASSERT_NOT_REACHED(); + } + + RELEASE_ASSERT( + static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeLocals); + + EncodedJSValue* scratch = static_cast<EncodedJSValue*>( + entryCode->entryBuffer()->dataBuffer()); + + for (int local = values.numberOfLocals(); local--;) + scratch[local] = JSValue::encode(values.local(local)); + + int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit(); + if (!vm.interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()])) { + if (Options::verboseOSR()) + dataLog(" OSR failed because stack growth failed.\n"); + return 0; + } + + exec->setCodeBlock(entryCodeBlock); + + void* result = entryCode->addressForCall(ArityCheckNotRequired).executableAddress(); + if (Options::verboseOSR()) + dataLog(" Entry will succeed, going to address", RawPointer(result), "\n"); + + return result; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + + diff --git a/Source/JavaScriptCore/ftl/FTLOSREntry.h b/Source/JavaScriptCore/ftl/FTLOSREntry.h new file mode 100644 index 000000000..3e2c11e43 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSREntry.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLOSREntry_h +#define FTLOSREntry_h + +#if ENABLE(FTL_JIT) + +namespace JSC { + +class CodeBlock; +class ExecState; + +namespace FTL { + +void* prepareOSREntry( + ExecState*, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock, unsigned bytecodeIndex, + unsigned streamIndex); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLOSREntry_h + diff --git a/Source/JavaScriptCore/ftl/FTLOSRExit.cpp b/Source/JavaScriptCore/ftl/FTLOSRExit.cpp new file mode 100644 index 000000000..a9cd3b90d --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSRExit.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLOSRExit.h" + +#if ENABLE(FTL_JIT) + +#include "AirGenerationContext.h" +#include "B3StackmapGenerationParams.h" +#include "B3StackmapValue.h" +#include "CodeBlock.h" +#include "DFGBasicBlock.h" +#include "DFGNode.h" +#include "FTLExitArgument.h" +#include "FTLJITCode.h" +#include "FTLLocation.h" +#include "FTLState.h" +#include "JSCInlines.h" + +namespace JSC { namespace FTL { + +using namespace B3; +using namespace DFG; + +OSRExitDescriptor::OSRExitDescriptor( + DataFormat profileDataFormat, MethodOfGettingAValueProfile valueProfile, + unsigned numberOfArguments, unsigned numberOfLocals) + : m_profileDataFormat(profileDataFormat) + , m_valueProfile(valueProfile) + , m_values(numberOfArguments, numberOfLocals) +{ +} + +void OSRExitDescriptor::validateReferences(const TrackedReferences& trackedReferences) +{ + for (unsigned i = m_values.size(); i--;) + m_values[i].validateReferences(trackedReferences); + + for (ExitTimeObjectMaterialization* materialization : m_materializations) + materialization->validateReferences(trackedReferences); +} + +RefPtr<OSRExitHandle> OSRExitDescriptor::emitOSRExit( + State& state, ExitKind exitKind, const NodeOrigin& nodeOrigin, CCallHelpers& jit, + const StackmapGenerationParams& params, unsigned offset) +{ + RefPtr<OSRExitHandle> handle = + prepareOSRExitHandle(state, exitKind, nodeOrigin, params, offset); + handle->emitExitThunk(state, jit); + return handle; +} + +RefPtr<OSRExitHandle> OSRExitDescriptor::emitOSRExitLater( + State& state, ExitKind exitKind, const NodeOrigin& nodeOrigin, + const StackmapGenerationParams& params, unsigned offset) +{ + RefPtr<OSRExitHandle> handle = + prepareOSRExitHandle(state, exitKind, nodeOrigin, params, offset); + params.addLatePath( + [handle, &state] (CCallHelpers& jit) { + handle->emitExitThunk(state, jit); + }); + return handle; +} + +RefPtr<OSRExitHandle> OSRExitDescriptor::prepareOSRExitHandle( + State& state, ExitKind exitKind, const NodeOrigin& nodeOrigin, + const StackmapGenerationParams& params, unsigned offset) +{ + unsigned index = state.jitCode->osrExit.size(); + OSRExit& exit = state.jitCode->osrExit.alloc( + this, exitKind, nodeOrigin.forExit, nodeOrigin.semantic); + RefPtr<OSRExitHandle> handle = adoptRef(new OSRExitHandle(index, exit)); + for (unsigned i = offset; i < params.size(); ++i) + exit.m_valueReps.append(params[i]); + exit.m_valueReps.shrinkToFit(); + return handle; +} + +OSRExit::OSRExit( + OSRExitDescriptor* descriptor, + ExitKind exitKind, CodeOrigin codeOrigin, CodeOrigin codeOriginForExitProfile) + : OSRExitBase(exitKind, codeOrigin, codeOriginForExitProfile) + , m_descriptor(descriptor) +{ +} + +CodeLocationJump OSRExit::codeLocationForRepatch(CodeBlock* ftlCodeBlock) const +{ + UNUSED_PARAM(ftlCodeBlock); + return m_patchableJump; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLOSRExit.h b/Source/JavaScriptCore/ftl/FTLOSRExit.h new file mode 100644 index 000000000..903527af0 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSRExit.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLOSRExit_h +#define FTLOSRExit_h + +#if ENABLE(FTL_JIT) + +#include "B3ValueRep.h" +#include "CodeOrigin.h" +#include "DFGExitProfile.h" +#include "DFGNodeOrigin.h" +#include "DFGOSRExitBase.h" +#include "FTLAbbreviatedTypes.h" +#include "FTLExitTimeObjectMaterialization.h" +#include "FTLExitValue.h" +#include "FTLFormattedValue.h" +#include "FTLOSRExitHandle.h" +#include "FTLStackmapArgumentList.h" +#include "HandlerInfo.h" +#include "MethodOfGettingAValueProfile.h" +#include "Operands.h" +#include "Reg.h" +#include "ValueProfile.h" +#include "VirtualRegister.h" + +namespace JSC { + +class TrackedReferences; + +namespace B3 { +class StackmapGenerationParams; +namespace Air { +struct GenerationContext; +} // namespace Air +} // namespace B3 + +namespace DFG { +struct NodeOrigin; +} // namespace DFG; + +namespace FTL { + +class State; +struct OSRExitDescriptorImpl; +struct OSRExitHandle; + +struct OSRExitDescriptor { + OSRExitDescriptor( + DataFormat profileDataFormat, MethodOfGettingAValueProfile, + unsigned numberOfArguments, unsigned numberOfLocals); + + // The first argument to the exit call may be a value we wish to profile. + // If that's the case, the format will be not Invalid and we'll have a + // method of getting a value profile. Note that all of the ExitArgument's + // are already aware of this possible off-by-one, so there is no need to + // correct them. + DataFormat m_profileDataFormat; + MethodOfGettingAValueProfile m_valueProfile; + + Operands<ExitValue> m_values; + Bag<ExitTimeObjectMaterialization> m_materializations; + + void validateReferences(const TrackedReferences&); + + // Call this once we have a place to emit the OSR exit jump and we have data about how the state + // should be recovered. This effectively emits code that does the exit, though the code is really a + // patchable jump and we emit the real code lazily. The description of how to emit the real code is + // up to the OSRExit object, which this creates. Note that it's OK to drop the OSRExitHandle object + // on the ground. It contains information that is mostly not useful if you use this API, since after + // this call, the OSRExit is simply ready to go. + RefPtr<OSRExitHandle> emitOSRExit( + State&, ExitKind, const DFG::NodeOrigin&, CCallHelpers&, const B3::StackmapGenerationParams&, + unsigned offset = 0); + + // In some cases you want an OSRExit to come into existence, but you don't want to emit it right now. + // This will emit the OSR exit in a late path. You can't be sure exactly when that will happen, but + // you know that it will be done by the time late path emission is done. So, a linker task will + // surely happen after that. You can use the OSRExitHandle to retrieve the exit's label. + // + // This API is meant to be used for things like exception handling, where some patchpoint wants to + // have a place to jump to for OSR exit. It doesn't care where that OSR exit is emitted so long as it + // eventually gets access to its label. + RefPtr<OSRExitHandle> emitOSRExitLater( + State&, ExitKind, const DFG::NodeOrigin&, const B3::StackmapGenerationParams&, + unsigned offset = 0); + +private: + // This is the low-level interface. It will create a handle representing the desire to emit code for + // an OSR exit. You can call OSRExitHandle::emitExitThunk() once you have a place to emit it. Note + // that the above two APIs are written in terms of this and OSRExitHandle::emitExitThunk(). + RefPtr<OSRExitHandle> prepareOSRExitHandle( + State&, ExitKind, const DFG::NodeOrigin&, const B3::StackmapGenerationParams&, + unsigned offset = 0); +}; + +struct OSRExit : public DFG::OSRExitBase { + OSRExit( + OSRExitDescriptor*, ExitKind, + CodeOrigin, CodeOrigin codeOriginForExitProfile + ); + + OSRExitDescriptor* m_descriptor; + MacroAssemblerCodeRef m_code; + // This tells us where to place a jump. + CodeLocationJump m_patchableJump; + Vector<B3::ValueRep> m_valueReps; + + CodeLocationJump codeLocationForRepatch(CodeBlock* ftlCodeBlock) const; + void considerAddingAsFrequentExitSite(CodeBlock* profiledCodeBlock) + { + OSRExitBase::considerAddingAsFrequentExitSite(profiledCodeBlock, ExitFromFTL); + } +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLOSRExit_h + diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp new file mode 100644 index 000000000..473192211 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp @@ -0,0 +1,587 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLOSRExitCompiler.h" + +#if ENABLE(FTL_JIT) + +#include "DFGOSRExitCompilerCommon.h" +#include "DFGOSRExitPreparation.h" +#include "FTLExitArgumentForOperand.h" +#include "FTLJITCode.h" +#include "FTLLocation.h" +#include "FTLOSRExit.h" +#include "FTLOperations.h" +#include "FTLState.h" +#include "FTLSaveRestore.h" +#include "LinkBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "OperandsInlines.h" +#include "JSCInlines.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +static void reboxAccordingToFormat( + DataFormat format, AssemblyHelpers& jit, GPRReg value, GPRReg scratch1, GPRReg scratch2) +{ + switch (format) { + case DataFormatInt32: { + jit.zeroExtend32ToPtr(value, value); + jit.or64(GPRInfo::tagTypeNumberRegister, value); + break; + } + + case DataFormatInt52: { + jit.rshift64(AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), value); + jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2); + jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0); + jit.move64ToDouble(scratch2, FPRInfo::fpRegT0); + break; + } + + case DataFormatStrictInt52: { + jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2); + jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0); + jit.move64ToDouble(scratch2, FPRInfo::fpRegT0); + break; + } + + case DataFormatBoolean: { + jit.zeroExtend32ToPtr(value, value); + jit.or32(MacroAssembler::TrustedImm32(ValueFalse), value); + break; + } + + case DataFormatJS: { + // Done already! + break; + } + + case DataFormatDouble: { + jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch1); + jit.move64ToDouble(value, FPRInfo::fpRegT0); + jit.purifyNaN(FPRInfo::fpRegT0); + jit.boxDouble(FPRInfo::fpRegT0, value); + jit.move64ToDouble(scratch1, FPRInfo::fpRegT0); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +} + +static void compileRecovery( + CCallHelpers& jit, const ExitValue& value, + Vector<B3::ValueRep>& valueReps, + char* registerScratch, + const HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*>& materializationToPointer) +{ + switch (value.kind()) { + case ExitValueDead: + jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0); + break; + + case ExitValueConstant: + jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0); + break; + + case ExitValueArgument: + Location::forValueRep(valueReps[value.exitArgument().argument()]).restoreInto( + jit, registerScratch, GPRInfo::regT0); + break; + + case ExitValueInJSStack: + case ExitValueInJSStackAsInt32: + case ExitValueInJSStackAsInt52: + case ExitValueInJSStackAsDouble: + jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0); + break; + + case ExitValueRecovery: + Location::forValueRep(valueReps[value.rightRecoveryArgument()]).restoreInto( + jit, registerScratch, GPRInfo::regT1); + Location::forValueRep(valueReps[value.leftRecoveryArgument()]).restoreInto( + jit, registerScratch, GPRInfo::regT0); + switch (value.recoveryOpcode()) { + case AddRecovery: + switch (value.recoveryFormat()) { + case DataFormatInt32: + jit.add32(GPRInfo::regT1, GPRInfo::regT0); + break; + case DataFormatInt52: + jit.add64(GPRInfo::regT1, GPRInfo::regT0); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + break; + case SubRecovery: + switch (value.recoveryFormat()) { + case DataFormatInt32: + jit.sub32(GPRInfo::regT1, GPRInfo::regT0); + break; + case DataFormatInt52: + jit.sub64(GPRInfo::regT1, GPRInfo::regT0); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + break; + + case ExitValueMaterializeNewObject: + jit.loadPtr(materializationToPointer.get(value.objectMaterialization()), GPRInfo::regT0); + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + + reboxAccordingToFormat( + value.dataFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); +} + +static void compileStub( + unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock) +{ + // This code requires framePointerRegister is the same as callFrameRegister + static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same"); + + CCallHelpers jit(vm, codeBlock); + + // The first thing we need to do is restablish our frame in the case of an exception. + if (exit.isGenericUnwindHandler()) { + RELEASE_ASSERT(vm->callFrameForCatch); // The first time we hit this exit, like at all other times, this field should be non-null. + jit.restoreCalleeSavesFromVMCalleeSavesBuffer(); + jit.loadPtr(vm->addressOfCallFrameForCatch(), MacroAssembler::framePointerRegister); + jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)), + MacroAssembler::framePointerRegister, CCallHelpers::stackPointerRegister); + + // Do a pushToSave because that's what the exit compiler below expects the stack + // to look like because that's the last thing the ExitThunkGenerator does. The code + // below doesn't actually use the value that was pushed, but it does rely on the + // general shape of the stack being as it is in the non-exception OSR case. + jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(0xbadbeef)); + } + + // We need scratch space to save all registers, to build up the JS stack, to deal with unwind + // fixup, pointers to all of the objects we materialize, and the elements inside those objects + // that we materialize. + + // Figure out how much space we need for those object allocations. + unsigned numMaterializations = 0; + size_t maxMaterializationNumArguments = 0; + for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) { + numMaterializations++; + + maxMaterializationNumArguments = std::max( + maxMaterializationNumArguments, + materialization->properties().size()); + } + + ScratchBuffer* scratchBuffer = vm->scratchBufferForSize( + sizeof(EncodedJSValue) * ( + exit.m_descriptor->m_values.size() + numMaterializations + maxMaterializationNumArguments) + + requiredScratchMemorySizeInBytes() + + codeBlock->calleeSaveRegisters()->size() * sizeof(uint64_t)); + EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; + EncodedJSValue* materializationPointers = scratch + exit.m_descriptor->m_values.size(); + EncodedJSValue* materializationArguments = materializationPointers + numMaterializations; + char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments); + uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes()); + + HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer; + unsigned materializationCount = 0; + for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) { + materializationToPointer.add( + materialization, materializationPointers + materializationCount++); + } + + auto recoverValue = [&] (const ExitValue& value) { + compileRecovery( + jit, value, + exit.m_valueReps, + registerScratch, materializationToPointer); + }; + + // Note that we come in here, the stack used to be as B3 left it except that someone called pushToSave(). + // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use + // that slot for saveAllRegisters(). + + saveAllRegisters(jit, registerScratch); + + // Bring the stack back into a sane form and assert that it's sane. + jit.popToRestore(GPRInfo::regT0); + jit.checkStackPointerAlignment(); + + if (vm->m_perBytecodeProfiler && jitCode->dfgCommon()->compilation) { + Profiler::Database& database = *vm->m_perBytecodeProfiler; + Profiler::Compilation* compilation = jitCode->dfgCommon()->compilation.get(); + + Profiler::OSRExit* profilerExit = compilation->addOSRExit( + exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin), + exit.m_kind, exit.m_kind == UncountableInvalidation); + jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress())); + } + + // The remaining code assumes that SP/FP are in the same state that they were in the FTL's + // call frame. + + // Get the call frame and tag thingies. + // Restore the exiting function's callFrame value into a regT4 + jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); + jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister); + + // Do some value profiling. + if (exit.m_descriptor->m_profileDataFormat != DataFormatNone) { + Location::forValueRep(exit.m_valueReps[0]).restoreInto(jit, registerScratch, GPRInfo::regT0); + reboxAccordingToFormat( + exit.m_descriptor->m_profileDataFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); + + if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { + CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; + if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { + jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1); + jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID()); + jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeOffset()), GPRInfo::regT1); + jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2); + jit.lshift32(GPRInfo::regT1, GPRInfo::regT2); + jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes())); + } + } + + if (!!exit.m_descriptor->m_valueProfile) + jit.store64(GPRInfo::regT0, exit.m_descriptor->m_valueProfile.getSpecFailBucket(0)); + } + + // Materialize all objects. Don't materialize an object until all + // of the objects it needs have been materialized. We break cycles + // by populating objects late - we only consider an object as + // needing another object if the later is needed for the + // allocation of the former. + + HashSet<ExitTimeObjectMaterialization*> toMaterialize; + for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) + toMaterialize.add(materialization); + + while (!toMaterialize.isEmpty()) { + unsigned previousToMaterializeSize = toMaterialize.size(); + + Vector<ExitTimeObjectMaterialization*> worklist; + worklist.appendRange(toMaterialize.begin(), toMaterialize.end()); + for (ExitTimeObjectMaterialization* materialization : worklist) { + // Check if we can do anything about this right now. + bool allGood = true; + for (ExitPropertyValue value : materialization->properties()) { + if (!value.value().isObjectMaterialization()) + continue; + if (!value.location().neededForMaterialization()) + continue; + if (toMaterialize.contains(value.value().objectMaterialization())) { + // Gotta skip this one, since it needs a + // materialization that hasn't been materialized. + allGood = false; + break; + } + } + if (!allGood) + continue; + + // All systems go for materializing the object. First we + // recover the values of all of its fields and then we + // call a function to actually allocate the beast. + // We only recover the fields that are needed for the allocation. + for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) { + const ExitPropertyValue& property = materialization->properties()[propertyIndex]; + if (!property.location().neededForMaterialization()) + continue; + + recoverValue(property.value()); + jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex); + } + + // This call assumes that we don't pass arguments on the stack. + jit.setupArgumentsWithExecState( + CCallHelpers::TrustedImmPtr(materialization), + CCallHelpers::TrustedImmPtr(materializationArguments)); + jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0); + jit.call(GPRInfo::nonArgGPR0); + jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization)); + + // Let everyone know that we're done. + toMaterialize.remove(materialization); + } + + // We expect progress! This ensures that we crash rather than looping infinitely if there + // is something broken about this fixpoint. Or, this could happen if we ever violate the + // "materializations form a DAG" rule. + RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize); + } + + // Now that all the objects have been allocated, we populate them + // with the correct values. This time we can recover all the + // fields, including those that are only needed for the allocation. + for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) { + for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) { + recoverValue(materialization->properties()[propertyIndex].value()); + jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex); + } + + // This call assumes that we don't pass arguments on the stack + jit.setupArgumentsWithExecState( + CCallHelpers::TrustedImmPtr(materialization), + CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)), + CCallHelpers::TrustedImmPtr(materializationArguments)); + jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0); + jit.call(GPRInfo::nonArgGPR0); + } + + // Save all state from wherever the exit data tells us it was, into the appropriate place in + // the scratch buffer. This also does the reboxing. + + for (unsigned index = exit.m_descriptor->m_values.size(); index--;) { + recoverValue(exit.m_descriptor->m_values[index]); + jit.store64(GPRInfo::regT0, scratch + index); + } + + // Henceforth we make it look like the exiting function was called through a register + // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then + // we restore the various things according to either exit.m_descriptor->m_values or by copying from the + // old frame, and finally we save the various callee-save registers into where the + // restoration thunk would restore them from. + + // Before we start messing with the frame, we need to set aside any registers that the + // FTL code was preserving. + for (unsigned i = codeBlock->calleeSaveRegisters()->size(); i--;) { + RegisterAtOffset entry = codeBlock->calleeSaveRegisters()->at(i); + jit.load64( + MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()), + GPRInfo::regT0); + jit.store64(GPRInfo::regT0, unwindScratch + i); + } + + jit.load32(CCallHelpers::payloadFor(JSStack::ArgumentCount), GPRInfo::regT2); + + // Let's say that the FTL function had failed its arity check. In that case, the stack will + // contain some extra stuff. + // + // We compute the padded stack space: + // + // paddedStackSpace = roundUp(codeBlock->numParameters - regT2 + 1) + // + // The stack will have regT2 + CallFrameHeaderSize stuff. + // We want to make the stack look like this, from higher addresses down: + // + // - argument padding + // - actual arguments + // - call frame header + + // This code assumes that we're dealing with FunctionCode. + RELEASE_ASSERT(codeBlock->codeType() == FunctionCode); + + jit.add32( + MacroAssembler::TrustedImm32(-codeBlock->numParameters()), GPRInfo::regT2, + GPRInfo::regT3); + MacroAssembler::Jump arityIntact = jit.branch32( + MacroAssembler::GreaterThanOrEqual, GPRInfo::regT3, MacroAssembler::TrustedImm32(0)); + jit.neg32(GPRInfo::regT3); + jit.add32(MacroAssembler::TrustedImm32(1 + stackAlignmentRegisters() - 1), GPRInfo::regT3); + jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), GPRInfo::regT3); + jit.add32(GPRInfo::regT3, GPRInfo::regT2); + arityIntact.link(&jit); + + CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin); + + // First set up SP so that our data doesn't get clobbered by signals. + unsigned conservativeStackDelta = + (exit.m_descriptor->m_values.numberOfLocals() + baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters()) * sizeof(Register) + + maxFrameExtentForSlowPathCall; + conservativeStackDelta = WTF::roundUpToMultipleOf( + stackAlignmentBytes(), conservativeStackDelta); + jit.addPtr( + MacroAssembler::TrustedImm32(-conservativeStackDelta), + MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister); + jit.checkStackPointerAlignment(); + + RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters(); + RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters(); + RegisterAtOffsetList* vmCalleeSaves = vm->getAllCalleeSaveRegisterOffsets(); + RegisterSet vmCalleeSavesToSkip = RegisterSet::stackRegisters(); + if (exit.isExceptionHandler()) + jit.move(CCallHelpers::TrustedImmPtr(vm->calleeSaveRegistersBuffer), GPRInfo::regT1); + + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!allFTLCalleeSaves.get(reg)) { + if (exit.isExceptionHandler()) + RELEASE_ASSERT(!vmCalleeSaves->find(reg)); + continue; + } + unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg); + RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg); + RegisterAtOffset* vmCalleeSave = nullptr; + if (exit.isExceptionHandler()) + vmCalleeSave = vmCalleeSaves->find(reg); + + if (reg.isGPR()) { + GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr(); + RELEASE_ASSERT(regToLoad != GPRInfo::regT1); + + if (unwindIndex == UINT_MAX) { + // The FTL compilation didn't preserve this register. This means that it also + // didn't use the register. So its value at the beginning of OSR exit should be + // preserved by the thunk. Luckily, we saved all registers into the register + // scratch buffer, so we can restore them from there. + jit.load64(registerScratch + offsetOfReg(reg), regToLoad); + } else { + // The FTL compilation preserved the register. Its new value is therefore + // irrelevant, but we can get the value that was preserved by using the unwind + // data. We've already copied all unwind-able preserved registers into the unwind + // scratch buffer, so we can get it from there. + jit.load64(unwindScratch + unwindIndex, regToLoad); + } + + if (baselineRegisterOffset) + jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset())); + if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg())) + jit.store64(regToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset())); + } else { + FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr(); + + if (unwindIndex == UINT_MAX) + jit.loadDouble(MacroAssembler::TrustedImmPtr(registerScratch + offsetOfReg(reg)), fpRegToLoad); + else + jit.loadDouble(MacroAssembler::TrustedImmPtr(unwindScratch + unwindIndex), fpRegToLoad); + + if (baselineRegisterOffset) + jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset())); + if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg())) + jit.storeDouble(fpRegToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset())); + } + } + + if (exit.isExceptionHandler()) { + RegisterAtOffset* vmCalleeSave = vmCalleeSaves->find(GPRInfo::tagTypeNumberRegister); + jit.store64(GPRInfo::tagTypeNumberRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset())); + + vmCalleeSave = vmCalleeSaves->find(GPRInfo::tagMaskRegister); + jit.store64(GPRInfo::tagMaskRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset())); + } + + size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters(); + + // Now get state out of the scratch buffer and place it back into the stack. The values are + // already reboxed so we just move them. + for (unsigned index = exit.m_descriptor->m_values.size(); index--;) { + VirtualRegister reg = exit.m_descriptor->m_values.virtualRegisterForIndex(index); + + if (reg.isLocal() && reg.toLocal() < static_cast<int>(baselineVirtualRegistersForCalleeSaves)) + continue; + + jit.load64(scratch + index, GPRInfo::regT0); + jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(reg)); + } + + handleExitCounts(jit, exit); + reifyInlinedCallFrames(jit, exit); + adjustAndJumpToTarget(jit, exit); + + LinkBuffer patchBuffer(*vm, jit, codeBlock); + exit.m_code = FINALIZE_CODE_IF( + shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(), + patchBuffer, + ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s", + exitID, toCString(exit.m_codeOrigin).data(), + exitKindToString(exit.m_kind), toCString(*codeBlock).data(), + toCString(ignoringContext<DumpContext>(exit.m_descriptor->m_values)).data()) + ); +} + +extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID) +{ + SamplingRegion samplingRegion("FTL OSR Exit Compilation"); + + if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) + dataLog("Compiling OSR exit with exitID = ", exitID, "\n"); + + if (exec->vm().callFrameForCatch) + RELEASE_ASSERT(exec->vm().callFrameForCatch == exec); + + CodeBlock* codeBlock = exec->codeBlock(); + + ASSERT(codeBlock); + ASSERT(codeBlock->jitType() == JITCode::FTLJIT); + + VM* vm = &exec->vm(); + + // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't + // really be profitable. + DeferGCForAWhile deferGC(vm->heap); + + JITCode* jitCode = codeBlock->jitCode()->ftl(); + OSRExit& exit = jitCode->osrExit[exitID]; + + if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) { + dataLog(" Owning block: ", pointerDump(codeBlock), "\n"); + dataLog(" Origin: ", exit.m_codeOrigin, "\n"); + if (exit.m_codeOriginForExitProfile != exit.m_codeOrigin) + dataLog(" Origin for exit profile: ", exit.m_codeOriginForExitProfile, "\n"); + dataLog(" Current call site index: ", exec->callSiteIndex().bits(), "\n"); + dataLog(" Exit is exception handler: ", exit.isExceptionHandler(), "\n"); + dataLog(" Is unwind handler: ", exit.isGenericUnwindHandler(), "\n"); + dataLog(" Exit values: ", exit.m_descriptor->m_values, "\n"); + dataLog(" Value reps: ", listDump(exit.m_valueReps), "\n"); + if (!exit.m_descriptor->m_materializations.isEmpty()) { + dataLog(" Materializations:\n"); + for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) + dataLog(" ", pointerDump(materialization), "\n"); + } + } + + prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); + + compileStub(exitID, jitCode, exit, vm, codeBlock); + + MacroAssembler::repatchJump( + exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code())); + + return exit.m_code.code().executableAddress(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h new file mode 100644 index 000000000..d26b81417 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLOSRExitCompiler_h +#define FTLOSRExitCompiler_h + +#if ENABLE(FTL_JIT) + +#include "DFGOperations.h" + +namespace JSC { namespace FTL { + +// Takes the call frame and the index of the exit, and returns the address to +// jump to. +extern "C" { +void* JIT_OPERATION compileFTLOSRExit(ExecState*, unsigned exitID) WTF_INTERNAL; +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLOSRExitCompiler_h + diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp new file mode 100644 index 000000000..11942b766 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.cpp @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLOSRExitHandle.h" + +#if ENABLE(FTL_JIT) + +#include "FTLOSRExit.h" +#include "FTLState.h" +#include "FTLThunks.h" +#include "LinkBuffer.h" +#include "ProfilerCompilation.h" + +namespace JSC { namespace FTL { + +void OSRExitHandle::emitExitThunk(State& state, CCallHelpers& jit) +{ + Profiler::Compilation* compilation = state.graph.compilation(); + CCallHelpers::Label myLabel = jit.label(); + label = myLabel; + jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(index)); + CCallHelpers::PatchableJump jump = jit.patchableJump(); + RefPtr<OSRExitHandle> self = this; + jit.addLinkTask( + [self, jump, myLabel, compilation] (LinkBuffer& linkBuffer) { + self->exit.m_patchableJump = CodeLocationJump(linkBuffer.locationOf(jump)); + + linkBuffer.link( + jump.m_jump, + CodeLocationLabel(linkBuffer.vm().getCTIStub(osrExitGenerationThunkGenerator).code())); + if (compilation) + compilation->addOSRExitSite({ linkBuffer.locationOf(myLabel).executableAddress() }); + }); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLOSRExitHandle.h b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.h new file mode 100644 index 000000000..254a6b096 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOSRExitHandle.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLOSRExitHandle_h +#define FTLOSRExitHandle_h + +#include "DFGCommon.h" + +#if ENABLE(FTL_JIT) + +#include "CCallHelpers.h" +#include <wtf/ThreadSafeRefCounted.h> + +namespace JSC { namespace FTL { + +class State; +struct OSRExit; + +// This is an object that stores some interesting data about an OSR exit. It's expected that you will +// scrape this data from this object by the time compilation finishes. +struct OSRExitHandle : public ThreadSafeRefCounted<OSRExitHandle> { + OSRExitHandle(unsigned index, OSRExit& exit) + : index(index) + , exit(exit) + { + } + + unsigned index; + OSRExit& exit; + + // This is the label at which the OSR exit jump lives. This will get populated once the OSR exit + // emits its jump. This happens immediately when you call OSRExit::appendOSRExit(). It happens at + // some time during late path emission if you do OSRExit::appendOSRExitLater(). + CCallHelpers::Label label; + + // This emits the exit thunk and populates 'label'. + void emitExitThunk(State&, CCallHelpers&); +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLOSRExitHandle_h + diff --git a/Source/JavaScriptCore/ftl/FTLOperations.cpp b/Source/JavaScriptCore/ftl/FTLOperations.cpp new file mode 100644 index 000000000..45c5bb9d5 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOperations.cpp @@ -0,0 +1,378 @@ +/* + * Copyright (C) 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLOperations.h" + +#if ENABLE(FTL_JIT) + +#include "ClonedArguments.h" +#include "DirectArguments.h" +#include "FTLJITCode.h" +#include "FTLLazySlowPath.h" +#include "InlineCallFrame.h" +#include "JSCInlines.h" +#include "JSGeneratorFunction.h" +#include "JSLexicalEnvironment.h" + +namespace JSC { namespace FTL { + +using namespace JSC::DFG; + +extern "C" JSCell* JIT_OPERATION operationNewObjectWithButterfly(ExecState* exec, Structure* structure) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + Butterfly* butterfly = Butterfly::create( + vm, nullptr, 0, structure->outOfLineCapacity(), false, IndexingHeader(), 0); + + JSObject* result = JSFinalObject::create(exec, structure, butterfly); + result->butterfly(); // Ensure that the butterfly is in to-space. + return result; +} + +extern "C" void JIT_OPERATION operationPopulateObjectInOSR( + ExecState* exec, ExitTimeObjectMaterialization* materialization, + EncodedJSValue* encodedValue, EncodedJSValue* values) +{ + VM& vm = exec->vm(); + CodeBlock* codeBlock = exec->codeBlock(); + + // We cannot GC. We've got pointers in evil places. + // FIXME: We are not doing anything that can GC here, and this is + // probably unnecessary. + DeferGCForAWhile deferGC(vm.heap); + + switch (materialization->type()) { + case PhantomNewObject: { + JSFinalObject* object = jsCast<JSFinalObject*>(JSValue::decode(*encodedValue)); + Structure* structure = object->structure(); + + // Figure out what the heck to populate the object with. Use + // getPropertiesConcurrently() because that happens to be + // lower-level and more convenient. It doesn't change the + // materialization of the property table. We want to have + // minimal visible effects on the system. Also, don't mind + // that this is O(n^2). It doesn't matter. We only get here + // from OSR exit. + for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) { + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() != NamedPropertyPLoc) + continue; + if (codeBlock->identifier(property.location().info()).impl() != entry.key) + continue; + + object->putDirect(vm, entry.offset, JSValue::decode(values[i])); + } + } + break; + } + + case PhantomNewFunction: + case PhantomNewGeneratorFunction: + case PhantomDirectArguments: + case PhantomClonedArguments: + // Those are completely handled by operationMaterializeObjectInOSR + break; + + case PhantomCreateActivation: { + JSLexicalEnvironment* activation = jsCast<JSLexicalEnvironment*>(JSValue::decode(*encodedValue)); + + // Figure out what to populate the activation with + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() != ClosureVarPLoc) + continue; + + activation->variableAt(ScopeOffset(property.location().info())).set(exec->vm(), activation, JSValue::decode(values[i])); + } + + break; + } + + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + + } +} + +extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( + ExecState* exec, ExitTimeObjectMaterialization* materialization, EncodedJSValue* values) +{ + VM& vm = exec->vm(); + + // We cannot GC. We've got pointers in evil places. + DeferGCForAWhile deferGC(vm.heap); + + switch (materialization->type()) { + case PhantomNewObject: { + // Figure out what the structure is + Structure* structure = nullptr; + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location() != PromotedLocationDescriptor(StructurePLoc)) + continue; + + structure = jsCast<Structure*>(JSValue::decode(values[i])); + break; + } + RELEASE_ASSERT(structure); + + JSFinalObject* result = JSFinalObject::create(vm, structure); + + // The real values will be put subsequently by + // operationPopulateNewObjectInOSR. We can't fill them in + // now, because they may not be available yet (typically + // because we have a cyclic dependency graph). + + // We put a dummy value here in order to avoid super-subtle + // GC-and-OSR-exit crashes in case we have a bug and some + // field is, for any reason, not filled later. + // We use a random-ish number instead of a sensible value like + // undefined to make possible bugs easier to track. + for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) + result->putDirect(vm, entry.offset, jsNumber(19723)); + + return result; + } + + case PhantomNewFunction: + case PhantomNewGeneratorFunction: { + // Figure out what the executable and activation are + FunctionExecutable* executable = nullptr; + JSScope* activation = nullptr; + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location() == PromotedLocationDescriptor(FunctionExecutablePLoc)) + executable = jsCast<FunctionExecutable*>(JSValue::decode(values[i])); + if (property.location() == PromotedLocationDescriptor(FunctionActivationPLoc)) + activation = jsCast<JSScope*>(JSValue::decode(values[i])); + } + RELEASE_ASSERT(executable && activation); + + if (materialization->type() == PhantomNewFunction) + return JSFunction::createWithInvalidatedReallocationWatchpoint(vm, executable, activation); + ASSERT(materialization->type() == PhantomNewGeneratorFunction); + return JSGeneratorFunction::createWithInvalidatedReallocationWatchpoint(vm, executable, activation); + } + + case PhantomCreateActivation: { + // Figure out what the scope and symbol table are + JSScope* scope = nullptr; + SymbolTable* table = nullptr; + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location() == PromotedLocationDescriptor(ActivationScopePLoc)) + scope = jsCast<JSScope*>(JSValue::decode(values[i])); + else if (property.location() == PromotedLocationDescriptor(ActivationSymbolTablePLoc)) + table = jsCast<SymbolTable*>(JSValue::decode(values[i])); + } + RELEASE_ASSERT(scope); + RELEASE_ASSERT(table); + + CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock( + materialization->origin(), exec->codeBlock()); + Structure* structure = codeBlock->globalObject()->activationStructure(); + + // It doesn't matter what values we initialize as bottom values inside the activation constructor because + // activation sinking will set bottom values for each slot. + // FIXME: Slight optimization would be to create a constructor that doesn't initialize all slots. + JSLexicalEnvironment* result = JSLexicalEnvironment::create(vm, structure, scope, table, jsUndefined()); + + RELEASE_ASSERT(materialization->properties().size() - 2 == table->scopeSize()); + + // The real values will be put subsequently by + // operationPopulateNewObjectInOSR. See the PhantomNewObject + // case for details. + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() != ClosureVarPLoc) + continue; + + result->variableAt(ScopeOffset(property.location().info())).set( + exec->vm(), result, jsNumber(29834)); + } + + if (validationEnabled()) { + // Validate to make sure every slot in the scope has one value. + ConcurrentJITLocker locker(table->m_lock); + for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) { + bool found = false; + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() != ClosureVarPLoc) + continue; + if (ScopeOffset(property.location().info()) == iter->value.scopeOffset()) { + found = true; + break; + } + } + ASSERT_UNUSED(found, found); + } + unsigned numberOfClosureVarPloc = 0; + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() == ClosureVarPLoc) + numberOfClosureVarPloc++; + } + ASSERT(numberOfClosureVarPloc == table->scopeSize()); + } + + return result; + } + + case PhantomDirectArguments: + case PhantomClonedArguments: { + if (!materialization->origin().inlineCallFrame) { + switch (materialization->type()) { + case PhantomDirectArguments: + return DirectArguments::createByCopying(exec); + case PhantomClonedArguments: + return ClonedArguments::createWithMachineFrame(exec, exec, ArgumentsMode::Cloned); + default: + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; + } + } + + // First figure out the argument count. If there isn't one then we represent the machine frame. + unsigned argumentCount = 0; + if (materialization->origin().inlineCallFrame->isVarargs()) { + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location() != PromotedLocationDescriptor(ArgumentCountPLoc)) + continue; + + argumentCount = JSValue::decode(values[i]).asUInt32(); + RELEASE_ASSERT(argumentCount); + break; + } + RELEASE_ASSERT(argumentCount); + } else + argumentCount = materialization->origin().inlineCallFrame->arguments.size(); + + JSFunction* callee = nullptr; + if (materialization->origin().inlineCallFrame->isClosureCall) { + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location() != PromotedLocationDescriptor(ArgumentsCalleePLoc)) + continue; + + callee = jsCast<JSFunction*>(JSValue::decode(values[i])); + break; + } + } else + callee = materialization->origin().inlineCallFrame->calleeConstant(); + RELEASE_ASSERT(callee); + + CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock( + materialization->origin(), exec->codeBlock()); + + // We have an inline frame and we have all of the data we need to recreate it. + switch (materialization->type()) { + case PhantomDirectArguments: { + unsigned length = argumentCount - 1; + unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1)); + DirectArguments* result = DirectArguments::create( + vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity); + result->callee().set(vm, result, callee); + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() != ArgumentPLoc) + continue; + + unsigned index = property.location().info(); + if (index >= capacity) + continue; + + // We don't want to use setIndexQuickly(), since that's only for the passed-in + // arguments but sometimes the number of named arguments is greater. For + // example: + // + // function foo(a, b, c) { ... } + // foo(); + // + // setIndexQuickly() would fail for indices 0, 1, 2 - but we need to recover + // those here. + result->argument(DirectArgumentsOffset(index)).set( + vm, result, JSValue::decode(values[i])); + } + return result; + } + case PhantomClonedArguments: { + unsigned length = argumentCount - 1; + ClonedArguments* result = ClonedArguments::createEmpty( + vm, codeBlock->globalObject()->outOfBandArgumentsStructure(), callee); + + for (unsigned i = materialization->properties().size(); i--;) { + const ExitPropertyValue& property = materialization->properties()[i]; + if (property.location().kind() != ArgumentPLoc) + continue; + + unsigned index = property.location().info(); + if (index >= length) + continue; + result->putDirectIndex(exec, index, JSValue::decode(values[i])); + } + + result->putDirect(vm, vm.propertyNames->length, jsNumber(length)); + return result; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; + } + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; + } +} + +extern "C" void* JIT_OPERATION compileFTLLazySlowPath(ExecState* exec, unsigned index) +{ + VM& vm = exec->vm(); + + // We cannot GC. We've got pointers in evil places. + DeferGCForAWhile deferGC(vm.heap); + + CodeBlock* codeBlock = exec->codeBlock(); + JITCode* jitCode = codeBlock->jitCode()->ftl(); + + LazySlowPath& lazySlowPath = *jitCode->lazySlowPaths[index]; + lazySlowPath.generate(codeBlock); + + return lazySlowPath.stub().code().executableAddress(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLOperations.h b/Source/JavaScriptCore/ftl/FTLOperations.h new file mode 100644 index 000000000..4f38c243a --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOperations.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLOperations_h +#define FTLOperations_h + +#if ENABLE(FTL_JIT) + +#include "DFGOperations.h" +#include "FTLExitTimeObjectMaterialization.h" + +namespace JSC { namespace FTL { + +class LazySlowPath; + +extern "C" { + +JSCell* JIT_OPERATION operationNewObjectWithButterfly(ExecState*, Structure*) WTF_INTERNAL; + +JSCell* JIT_OPERATION operationMaterializeObjectInOSR( + ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*) WTF_INTERNAL; + +void JIT_OPERATION operationPopulateObjectInOSR( + ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*, EncodedJSValue*) WTF_INTERNAL; + +void* JIT_OPERATION compileFTLLazySlowPath(ExecState*, unsigned) WTF_INTERNAL; + +} // extern "C" + +} } // namespace JSC::DFG + +#endif // ENABLE(FTL_JIT) + +#endif // FTLOperations_h + diff --git a/Source/JavaScriptCore/ftl/FTLOutput.cpp b/Source/JavaScriptCore/ftl/FTLOutput.cpp new file mode 100644 index 000000000..c4f2861ad --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOutput.cpp @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLOutput.h" + +#if ENABLE(FTL_JIT) + +#include "B3MathExtras.h" +#include "B3StackmapGenerationParams.h" + +namespace JSC { namespace FTL { + +using namespace B3; + +Output::Output(State& state) + : m_proc(*state.proc) +{ +} + +Output::~Output() +{ +} + +void Output::initialize(AbstractHeapRepository& heaps) +{ + m_heaps = &heaps; +} + +LBasicBlock Output::newBlock() +{ + LBasicBlock result = m_proc.addBlock(m_frequency); + + if (!m_nextBlock) + m_blockOrder.append(result); + else + m_blockOrder.insertBefore(m_nextBlock, result); + + return result; +} + +void Output::applyBlockOrder() +{ + m_proc.setBlockOrder(m_blockOrder); +} + +LBasicBlock Output::appendTo(LBasicBlock block, LBasicBlock nextBlock) +{ + appendTo(block); + return insertNewBlocksBefore(nextBlock); +} + +void Output::appendTo(LBasicBlock block) +{ + m_block = block; +} + +SlotBaseValue* Output::lockedStackSlot(size_t bytes) +{ + return m_block->appendNew<SlotBaseValue>(m_proc, origin(), m_proc.addStackSlot(bytes)); +} + +LValue Output::neg(LValue value) +{ + return m_block->appendNew<Value>(m_proc, B3::Neg, origin(), value); +} + +LValue Output::bitNot(LValue value) +{ + return m_block->appendNew<B3::Value>(m_proc, B3::BitXor, origin(), + value, + m_block->appendIntConstant(m_proc, origin(), value->type(), -1)); +} + +LValue Output::logicalNot(LValue value) +{ + return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), value, int32Zero); +} + +LValue Output::load(TypedPointer pointer, LType type) +{ + LValue load = m_block->appendNew<MemoryValue>(m_proc, Load, type, origin(), pointer.value()); + m_heaps->decorateMemory(pointer.heap(), load); + return load; +} + +LValue Output::doublePowi(LValue x, LValue y) +{ + // FIXME: powDoubleInt32() should be inlined here since Output knows about block layout and + // should be involved in any operation that creates blocks. + // https://bugs.webkit.org/show_bug.cgi?id=152223 + auto result = powDoubleInt32(m_proc, m_block, origin(), x, y); + m_block = result.first; + return result.second; +} + +bool Output::hasSensibleDoubleToInt() +{ + return optimizeForX86(); +} + +LValue Output::doubleToInt(LValue value) +{ + PatchpointValue* result = patchpoint(Int32); + result->append(value, ValueRep::SomeRegister); + result->setGenerator( + [] (CCallHelpers& jit, const StackmapGenerationParams& params) { + jit.truncateDoubleToInt32(params[1].fpr(), params[0].gpr()); + }); + result->effects = Effects::none(); + return result; +} + +LValue Output::doubleToUInt(LValue value) +{ + PatchpointValue* result = patchpoint(Int32); + result->append(value, ValueRep::SomeRegister); + result->setGenerator( + [] (CCallHelpers& jit, const StackmapGenerationParams& params) { + jit.truncateDoubleToUint32(params[1].fpr(), params[0].gpr()); + }); + result->effects = Effects::none(); + return result; +} + +LValue Output::unsignedToDouble(LValue value) +{ + return intToDouble(zeroExt(value, Int64)); +} + +LValue Output::load8SignExt32(TypedPointer pointer) +{ + LValue load = m_block->appendNew<MemoryValue>(m_proc, Load8S, Int32, origin(), pointer.value()); + m_heaps->decorateMemory(pointer.heap(), load); + return load; +} + +LValue Output::load8ZeroExt32(TypedPointer pointer) +{ + LValue load = m_block->appendNew<MemoryValue>(m_proc, Load8Z, Int32, origin(), pointer.value()); + m_heaps->decorateMemory(pointer.heap(), load); + return load; +} + +LValue Output::load16SignExt32(TypedPointer pointer) +{ + LValue load = m_block->appendNew<MemoryValue>(m_proc, Load16S, Int32, origin(), pointer.value()); + m_heaps->decorateMemory(pointer.heap(), load); + return load; +} + +LValue Output::load16ZeroExt32(TypedPointer pointer) +{ + LValue load = m_block->appendNew<MemoryValue>(m_proc, Load16Z, Int32, origin(), pointer.value()); + m_heaps->decorateMemory(pointer.heap(), load); + return load; +} + +void Output::store(LValue value, TypedPointer pointer) +{ + LValue store = m_block->appendNew<MemoryValue>(m_proc, Store, origin(), value, pointer.value()); + m_heaps->decorateMemory(pointer.heap(), store); +} + +void Output::store32As8(LValue value, TypedPointer pointer) +{ + LValue store = m_block->appendNew<MemoryValue>(m_proc, Store8, origin(), value, pointer.value()); + m_heaps->decorateMemory(pointer.heap(), store); +} + +void Output::store32As16(LValue value, TypedPointer pointer) +{ + LValue store = m_block->appendNew<MemoryValue>(m_proc, Store16, origin(), value, pointer.value()); + m_heaps->decorateMemory(pointer.heap(), store); +} + +LValue Output::baseIndex(LValue base, LValue index, Scale scale, ptrdiff_t offset) +{ + LValue accumulatedOffset; + + switch (scale) { + case ScaleOne: + accumulatedOffset = index; + break; + case ScaleTwo: + accumulatedOffset = shl(index, intPtrOne); + break; + case ScaleFour: + accumulatedOffset = shl(index, intPtrTwo); + break; + case ScaleEight: + case ScalePtr: + accumulatedOffset = shl(index, intPtrThree); + break; + } + + if (offset) + accumulatedOffset = add(accumulatedOffset, constIntPtr(offset)); + + return add(base, accumulatedOffset); +} + +void Output::branch(LValue condition, LBasicBlock taken, Weight takenWeight, LBasicBlock notTaken, Weight notTakenWeight) +{ + m_block->appendNew<ControlValue>( + m_proc, B3::Branch, origin(), condition, + FrequentedBlock(taken, takenWeight.frequencyClass()), + FrequentedBlock(notTaken, notTakenWeight.frequencyClass())); +} + +void Output::check(LValue condition, WeightedTarget taken, Weight notTakenWeight) +{ + LBasicBlock continuation = newBlock(); + branch(condition, taken, WeightedTarget(continuation, notTakenWeight)); + appendTo(continuation); +} + +void Output::check(LValue condition, WeightedTarget taken) +{ + check(condition, taken, taken.weight().inverse()); +} + +LValue Output::load(TypedPointer pointer, LoadType type) +{ + switch (type) { + case Load8SignExt32: + return load8SignExt32(pointer); + case Load8ZeroExt32: + return load8ZeroExt32(pointer); + case Load16SignExt32: + return load8SignExt32(pointer); + case Load16ZeroExt32: + return load8ZeroExt32(pointer); + case Load32: + return load32(pointer); + case Load64: + return load64(pointer); + case LoadPtr: + return loadPtr(pointer); + case LoadFloat: + return loadFloat(pointer); + case LoadDouble: + return loadDouble(pointer); + } + RELEASE_ASSERT_NOT_REACHED(); + return nullptr; +} + +void Output::store(LValue value, TypedPointer pointer, StoreType type) +{ + switch (type) { + case Store32As8: + store32As8(value, pointer); + return; + case Store32As16: + store32As16(value, pointer); + return; + case Store32: + store32(value, pointer); + return; + case Store64: + store64(value, pointer); + return; + case StorePtr: + storePtr(value, pointer); + return; + case StoreFloat: + storeFloat(value, pointer); + return; + case StoreDouble: + storeDouble(value, pointer); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLOutput.h b/Source/JavaScriptCore/ftl/FTLOutput.h new file mode 100644 index 000000000..dd50c2411 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLOutput.h @@ -0,0 +1,531 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLOutput_h +#define FTLOutput_h + +#include "DFGCommon.h" + +#if ENABLE(FTL_JIT) + +#include "B3ArgumentRegValue.h" +#include "B3BasicBlockInlines.h" +#include "B3CCallValue.h" +#include "B3Compilation.h" +#include "B3Const32Value.h" +#include "B3ConstPtrValue.h" +#include "B3ControlValue.h" +#include "B3MemoryValue.h" +#include "B3Procedure.h" +#include "B3SlotBaseValue.h" +#include "B3SwitchValue.h" +#include "B3UpsilonValue.h" +#include "B3ValueInlines.h" +#include "FTLAbbreviatedTypes.h" +#include "FTLAbstractHeapRepository.h" +#include "FTLCommonValues.h" +#include "FTLState.h" +#include "FTLSwitchCase.h" +#include "FTLTypedPointer.h" +#include "FTLValueFromBlock.h" +#include "FTLWeight.h" +#include "FTLWeightedTarget.h" +#include <wtf/OrderMaker.h> +#include <wtf/StringPrintStream.h> + +// FIXME: remove this once everything can be generated through B3. +#if COMPILER(GCC_OR_CLANG) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-noreturn" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif // COMPILER(GCC_OR_CLANG) + +namespace JSC { + +namespace DFG { struct Node; } + +namespace FTL { + +enum Scale { ScaleOne, ScaleTwo, ScaleFour, ScaleEight, ScalePtr }; + +class Output : public CommonValues { +public: + Output(State&); + ~Output(); + + void initialize(AbstractHeapRepository&); + + void setFrequency(double value) + { + m_frequency = value; + } + + LBasicBlock newBlock(); + + LBasicBlock insertNewBlocksBefore(LBasicBlock nextBlock) + { + LBasicBlock lastNextBlock = m_nextBlock; + m_nextBlock = nextBlock; + return lastNextBlock; + } + + void applyBlockOrder(); + + LBasicBlock appendTo(LBasicBlock, LBasicBlock nextBlock); + void appendTo(LBasicBlock); + + void setOrigin(DFG::Node* node) { m_origin = node; } + B3::Origin origin() { return B3::Origin(m_origin); } + + LValue framePointer() { return m_block->appendNew<B3::Value>(m_proc, B3::FramePointer, origin()); } + + B3::SlotBaseValue* lockedStackSlot(size_t bytes); + + LValue constBool(bool value) { return m_block->appendNew<B3::Const32Value>(m_proc, origin(), value); } + LValue constInt32(int32_t value) { return m_block->appendNew<B3::Const32Value>(m_proc, origin(), value); } + template<typename T> + LValue constIntPtr(T* value) { return m_block->appendNew<B3::ConstPtrValue>(m_proc, origin(), value); } + template<typename T> + LValue constIntPtr(T value) { return m_block->appendNew<B3::ConstPtrValue>(m_proc, origin(), value); } + LValue constInt64(int64_t value) { return m_block->appendNew<B3::Const64Value>(m_proc, origin(), value); } + LValue constDouble(double value) { return m_block->appendNew<B3::ConstDoubleValue>(m_proc, origin(), value); } + + LValue phi(LType type) { return m_block->appendNew<B3::Value>(m_proc, B3::Phi, type, origin()); } + template<typename... Params> + LValue phi(LType, ValueFromBlock, Params... theRest); + template<typename VectorType> + LValue phi(LType, const VectorType&); + void addIncomingToPhi(LValue phi, ValueFromBlock); + template<typename... Params> + void addIncomingToPhi(LValue phi, ValueFromBlock, Params... theRest); + + LValue add(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Add, origin(), left, right); } + LValue sub(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Sub, origin(), left, right); } + LValue mul(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Mul, origin(), left, right); } + LValue div(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Div, origin(), left, right); } + LValue chillDiv(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::ChillDiv, origin(), left, right); } + LValue mod(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Mod, origin(), left, right); } + LValue chillMod(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::ChillMod, origin(), left, right); } + LValue neg(LValue); + + LValue doubleAdd(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Add, origin(), left, right); } + LValue doubleSub(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Sub, origin(), left, right); } + LValue doubleMul(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Mul, origin(), left, right); } + LValue doubleDiv(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Div, origin(), left, right); } + LValue doubleMod(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Mod, origin(), left, right); } + LValue doubleNeg(LValue value) { return neg(value); } + + LValue bitAnd(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::BitAnd, origin(), left, right); } + LValue bitOr(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::BitOr, origin(), left, right); } + LValue bitXor(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::BitXor, origin(), left, right); } + LValue shl(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Shl, origin(), left, castToInt32(right)); } + LValue aShr(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::SShr, origin(), left, castToInt32(right)); } + LValue lShr(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::ZShr, origin(), left, castToInt32(right)); } + LValue bitNot(LValue); + LValue logicalNot(LValue); + + LValue ctlz32(LValue operand) { return m_block->appendNew<B3::Value>(m_proc, B3::Clz, origin(), operand); } + LValue addWithOverflow32(LValue left, LValue right) { CRASH(); } + LValue subWithOverflow32(LValue left, LValue right) { CRASH(); } + LValue mulWithOverflow32(LValue left, LValue right) { CRASH(); } + LValue addWithOverflow64(LValue left, LValue right) { CRASH(); } + LValue subWithOverflow64(LValue left, LValue right) { CRASH(); } + LValue mulWithOverflow64(LValue left, LValue right) { CRASH(); } + LValue doubleAbs(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::Abs, origin(), value); } + LValue doubleCeil(LValue operand) { return m_block->appendNew<B3::Value>(m_proc, B3::Ceil, origin(), operand); } + LValue doubleFloor(LValue operand) { return m_block->appendNew<B3::Value>(m_proc, B3::Floor, origin(), operand); } + + LValue doubleSin(LValue value) + { + double (*sinDouble)(double) = sin; + return callWithoutSideEffects(B3::Double, sinDouble, value); + } + LValue doubleCos(LValue value) + { + double (*cosDouble)(double) = cos; + return callWithoutSideEffects(B3::Double, cosDouble, value); + } + + LValue doublePow(LValue xOperand, LValue yOperand) + { + double (*powDouble)(double, double) = pow; + return callWithoutSideEffects(B3::Double, powDouble, xOperand, yOperand); + } + + LValue doublePowi(LValue xOperand, LValue yOperand); + + LValue doubleSqrt(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::Sqrt, origin(), value); } + + LValue doubleLog(LValue value) + { + double (*logDouble)(double) = log; + return callWithoutSideEffects(B3::Double, logDouble, value); + } + + static bool hasSensibleDoubleToInt(); + LValue doubleToInt(LValue); + LValue doubleToUInt(LValue); + + LValue signExt32To64(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::SExt32, origin(), value); } + LValue zeroExt(LValue value, LType type) + { + if (value->type() == type) + return value; + return m_block->appendNew<B3::Value>(m_proc, B3::ZExt32, origin(), value); + } + LValue zeroExtPtr(LValue value) { return zeroExt(value, B3::Int64); } + LValue intToDouble(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::IToD, origin(), value); } + LValue unsignedToDouble(LValue); + LValue castToInt32(LValue value) + { + return value->type() == B3::Int32 ? value : + m_block->appendNew<B3::Value>(m_proc, B3::Trunc, origin(), value); + } + LValue doubleToFloat(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::DoubleToFloat, origin(), value); } + LValue floatToDouble(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::FloatToDouble, origin(), value); } + LValue bitCast(LValue, LType); + LValue fround(LValue doubleValue); + + LValue load(TypedPointer, LType); + void store(LValue, TypedPointer); + + LValue load8SignExt32(TypedPointer); + LValue load8ZeroExt32(TypedPointer); + LValue load16SignExt32(TypedPointer); + LValue load16ZeroExt32(TypedPointer); + LValue load32(TypedPointer pointer) { return load(pointer, B3::Int32); } + LValue load64(TypedPointer pointer) { return load(pointer, B3::Int64); } + LValue loadPtr(TypedPointer pointer) { return load(pointer, B3::pointerType()); } + LValue loadFloat(TypedPointer pointer) { return load(pointer, B3::Float); } + LValue loadDouble(TypedPointer pointer) { return load(pointer, B3::Double); } + void store32As8(LValue value, TypedPointer pointer); + void store32As16(LValue value, TypedPointer pointer); + void store32(LValue value, TypedPointer pointer) + { + ASSERT(value->type() == B3::Int32); + store(value, pointer); + } + void store64(LValue value, TypedPointer pointer) + { + ASSERT(value->type() == B3::Int64); + store(value, pointer); + } + void storePtr(LValue value, TypedPointer pointer) + { + ASSERT(value->type() == B3::pointerType()); + store(value, pointer); + } + void storeFloat(LValue value, TypedPointer pointer) + { + ASSERT(value->type() == B3::Float); + store(value, pointer); + } + void storeDouble(LValue value, TypedPointer pointer) + { + ASSERT(value->type() == B3::Double); + store(value, pointer); + } + + enum LoadType { + Load8SignExt32, + Load8ZeroExt32, + Load16SignExt32, + Load16ZeroExt32, + Load32, + Load64, + LoadPtr, + LoadFloat, + LoadDouble + }; + + LValue load(TypedPointer, LoadType); + + enum StoreType { + Store32As8, + Store32As16, + Store32, + Store64, + StorePtr, + StoreFloat, + StoreDouble + }; + + void store(LValue, TypedPointer, StoreType); + + LValue addPtr(LValue value, ptrdiff_t immediate = 0) + { + if (!immediate) + return value; + return add(value, constIntPtr(immediate)); + } + + // Construct an address by offsetting base by the requested amount and ascribing + // the requested abstract heap to it. + TypedPointer address(const AbstractHeap& heap, LValue base, ptrdiff_t offset = 0) + { + return TypedPointer(heap, addPtr(base, offset)); + } + // Construct an address by offsetting base by the amount specified by the field, + // and optionally an additional amount (use this with care), and then creating + // a TypedPointer with the given field as the heap. + TypedPointer address(LValue base, const AbstractHeap& field, ptrdiff_t offset = 0) + { + return address(field, base, offset + field.offset()); + } + + LValue baseIndex(LValue base, LValue index, Scale, ptrdiff_t offset = 0); + + TypedPointer baseIndex(const AbstractHeap& heap, LValue base, LValue index, Scale scale, ptrdiff_t offset = 0) + { + return TypedPointer(heap, baseIndex(base, index, scale, offset)); + } + TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue base, LValue index, JSValue indexAsConstant = JSValue(), ptrdiff_t offset = 0) + { + return heap.baseIndex(*this, base, index, indexAsConstant, offset); + } + + TypedPointer absolute(void* address) + { + return TypedPointer(m_heaps->absolute[address], constIntPtr(address)); + } + + LValue load8SignExt32(LValue base, const AbstractHeap& field) { return load8SignExt32(address(base, field)); } + LValue load8ZeroExt32(LValue base, const AbstractHeap& field) { return load8ZeroExt32(address(base, field)); } + LValue load16SignExt32(LValue base, const AbstractHeap& field) { return load16SignExt32(address(base, field)); } + LValue load16ZeroExt32(LValue base, const AbstractHeap& field) { return load16ZeroExt32(address(base, field)); } + LValue load32(LValue base, const AbstractHeap& field) { return load32(address(base, field)); } + LValue load64(LValue base, const AbstractHeap& field) { return load64(address(base, field)); } + LValue loadPtr(LValue base, const AbstractHeap& field) { return loadPtr(address(base, field)); } + LValue loadDouble(LValue base, const AbstractHeap& field) { return loadDouble(address(base, field)); } + void store32(LValue value, LValue base, const AbstractHeap& field) { store32(value, address(base, field)); } + void store64(LValue value, LValue base, const AbstractHeap& field) { store64(value, address(base, field)); } + void storePtr(LValue value, LValue base, const AbstractHeap& field) { storePtr(value, address(base, field)); } + void storeDouble(LValue value, LValue base, const AbstractHeap& field) { storeDouble(value, address(base, field)); } + + // FIXME: Explore adding support for value range constraints to B3. Maybe it could be as simple as having + // a load instruction that guarantees that its result is non-negative. + // https://bugs.webkit.org/show_bug.cgi?id=151458 + void ascribeRange(LValue, const ValueRange&) { } + LValue nonNegative32(LValue loadInstruction) { return loadInstruction; } + LValue load32NonNegative(TypedPointer pointer) { return load32(pointer); } + LValue load32NonNegative(LValue base, const AbstractHeap& field) { return load32(base, field); } + + LValue equal(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), left, right); } + LValue notEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), left, right); } + LValue above(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Above, origin(), left, right); } + LValue aboveOrEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::AboveEqual, origin(), left, right); } + LValue below(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Below, origin(), left, right); } + LValue belowOrEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::BelowEqual, origin(), left, right); } + LValue greaterThan(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::GreaterThan, origin(), left, right); } + LValue greaterThanOrEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::GreaterEqual, origin(), left, right); } + LValue lessThan(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::LessThan, origin(), left, right); } + LValue lessThanOrEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::LessEqual, origin(), left, right); } + + LValue doubleEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), left, right); } + LValue doubleEqualOrUnordered(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::EqualOrUnordered, origin(), left, right); } + LValue doubleNotEqualOrUnordered(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), left, right); } + LValue doubleLessThan(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::LessThan, origin(), left, right); } + LValue doubleLessThanOrEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::LessEqual, origin(), left, right); } + LValue doubleGreaterThan(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::GreaterThan, origin(), left, right); } + LValue doubleGreaterThanOrEqual(LValue left, LValue right) { return m_block->appendNew<B3::Value>(m_proc, B3::GreaterEqual, origin(), left, right); } + LValue doubleNotEqualAndOrdered(LValue left, LValue right) { return logicalNot(doubleEqualOrUnordered(left, right)); } + LValue doubleLessThanOrUnordered(LValue left, LValue right) { return logicalNot(doubleGreaterThanOrEqual(left, right)); } + LValue doubleLessThanOrEqualOrUnordered(LValue left, LValue right) { return logicalNot(doubleGreaterThan(left, right)); } + LValue doubleGreaterThanOrUnordered(LValue left, LValue right) { return logicalNot(doubleLessThanOrEqual(left, right)); } + LValue doubleGreaterThanOrEqualOrUnordered(LValue left, LValue right) { return logicalNot(doubleLessThan(left, right)); } + + LValue isZero32(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), value, int32Zero); } + LValue notZero32(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), value, int32Zero); } + LValue isZero64(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::Equal, origin(), value, int64Zero); } + LValue notZero64(LValue value) { return m_block->appendNew<B3::Value>(m_proc, B3::NotEqual, origin(), value, int64Zero); } + LValue isNull(LValue value) { return isZero64(value); } + LValue notNull(LValue value) { return notZero64(value); } + + LValue testIsZero32(LValue value, LValue mask) { return isZero32(bitAnd(value, mask)); } + LValue testNonZero32(LValue value, LValue mask) { return notZero32(bitAnd(value, mask)); } + LValue testIsZero64(LValue value, LValue mask) { return isZero64(bitAnd(value, mask)); } + LValue testNonZero64(LValue value, LValue mask) { return notZero64(bitAnd(value, mask)); } + LValue testIsZeroPtr(LValue value, LValue mask) { return isNull(bitAnd(value, mask)); } + LValue testNonZeroPtr(LValue value, LValue mask) { return notNull(bitAnd(value, mask)); } + + LValue select(LValue value, LValue taken, LValue notTaken) { return m_block->appendNew<B3::Value>(m_proc, B3::Select, origin(), value, taken, notTaken); } + LValue extractValue(LValue aggVal, unsigned index) { CRASH(); } + + template<typename VectorType> + LValue call(LType type, LValue function, const VectorType& vector) + { + B3::CCallValue* result = m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); + result->children().appendVector(vector); + return result; + } + LValue call(LType type, LValue function) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); } + LValue call(LType type, LValue function, LValue arg1) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1); } + template<typename... Args> + LValue call(LType type, LValue function, LValue arg1, Args... args) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1, args...); } + + template<typename FunctionType> + LValue operation(FunctionType function) { return constIntPtr(bitwise_cast<void*>(function)); } + + void jump(LBasicBlock destination) { m_block->appendNew<B3::ControlValue>(m_proc, B3::Jump, origin(), B3::FrequentedBlock(destination)); } + void branch(LValue condition, LBasicBlock taken, Weight takenWeight, LBasicBlock notTaken, Weight notTakenWeight); + void branch(LValue condition, WeightedTarget taken, WeightedTarget notTaken) + { + branch(condition, taken.target(), taken.weight(), notTaken.target(), notTaken.weight()); + } + + // Branches to an already-created handler if true, "falls through" if false. Fall-through is + // simulated by creating a continuation for you. + void check(LValue condition, WeightedTarget taken, Weight notTakenWeight); + + // Same as check(), but uses Weight::inverse() to compute the notTakenWeight. + void check(LValue condition, WeightedTarget taken); + + template<typename VectorType> + void switchInstruction(LValue value, const VectorType& cases, LBasicBlock fallThrough, Weight fallThroughWeight) + { + B3::SwitchValue* switchValue = m_block->appendNew<B3::SwitchValue>( + m_proc, origin(), value, B3::FrequentedBlock(fallThrough)); + for (const SwitchCase& switchCase : cases) { + int64_t value = switchCase.value()->asInt(); + B3::FrequentedBlock target(switchCase.target(), switchCase.weight().frequencyClass()); + switchValue->appendCase(B3::SwitchCase(value, target)); + } + } + + void ret(LValue value) { m_block->appendNew<B3::ControlValue>(m_proc, B3::Return, origin(), value); } + + void unreachable() { m_block->appendNew<B3::ControlValue>(m_proc, B3::Oops, origin()); } + + B3::CheckValue* speculate(LValue value) + { + return m_block->appendNew<B3::CheckValue>(m_proc, B3::Check, origin(), value); + } + + B3::CheckValue* speculateAdd(LValue left, LValue right) + { + return m_block->appendNew<B3::CheckValue>(m_proc, B3::CheckAdd, origin(), left, right); + } + + B3::CheckValue* speculateSub(LValue left, LValue right) + { + return m_block->appendNew<B3::CheckValue>(m_proc, B3::CheckSub, origin(), left, right); + } + + B3::CheckValue* speculateMul(LValue left, LValue right) + { + return m_block->appendNew<B3::CheckValue>(m_proc, B3::CheckMul, origin(), left, right); + } + + B3::PatchpointValue* patchpoint(LType type) + { + return m_block->appendNew<B3::PatchpointValue>(m_proc, type, origin()); + } + + void trap() + { + m_block->appendNew<B3::ControlValue>(m_proc, B3::Oops, origin()); + } + + ValueFromBlock anchor(LValue value) + { + B3::UpsilonValue* upsilon = m_block->appendNew<B3::UpsilonValue>(m_proc, origin(), value); + return ValueFromBlock(upsilon, m_block); + } + +#if PLATFORM(COCOA) +#pragma mark - States +#endif + B3::Procedure& m_proc; + + DFG::Node* m_origin { nullptr }; + LBasicBlock m_block { nullptr }; + LBasicBlock m_nextBlock { nullptr }; + + AbstractHeapRepository* m_heaps; + + double m_frequency { 1 }; + +private: + OrderMaker<LBasicBlock> m_blockOrder; + + template<typename Function, typename... Args> + LValue callWithoutSideEffects(B3::Type type, Function function, LValue arg1, Args... args) + { + return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), B3::Effects::none(), + m_block->appendNew<B3::ConstPtrValue>(m_proc, origin(), bitwise_cast<void*>(function)), + arg1, args...); + } + +}; + +template<typename... Params> +inline LValue Output::phi(LType type, ValueFromBlock value, Params... theRest) +{ + LValue phiNode = phi(type); + addIncomingToPhi(phiNode, value, theRest...); + return phiNode; +} + +template<typename VectorType> +inline LValue Output::phi(LType type, const VectorType& vector) +{ + LValue phiNode = phi(type); + for (const ValueFromBlock& valueFromBlock : vector) + addIncomingToPhi(phiNode, valueFromBlock); + return phiNode; +} + +inline void Output::addIncomingToPhi(LValue phi, ValueFromBlock value) +{ + value.value()->as<B3::UpsilonValue>()->setPhi(phi); +} + +template<typename... Params> +inline void Output::addIncomingToPhi(LValue phi, ValueFromBlock value, Params... theRest) +{ + addIncomingToPhi(phi, value); + addIncomingToPhi(phi, theRest...); +} + +inline LValue Output::bitCast(LValue value, LType type) +{ + ASSERT_UNUSED(type, type == int64 || type == doubleType); + return m_block->appendNew<B3::Value>(m_proc, B3::BitwiseCast, origin(), value); +} + +inline LValue Output::fround(LValue doubleValue) +{ + return floatToDouble(doubleToFloat(doubleValue)); +} + +#if COMPILER(GCC_OR_CLANG) +#pragma GCC diagnostic pop +#endif // COMPILER(GCC_OR_CLANG) + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLOutput_h diff --git a/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp new file mode 100644 index 000000000..6dcc5ee87 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLPatchpointExceptionHandle.h" + +#if ENABLE(FTL_JIT) + +#include "B3StackmapGenerationParams.h" +#include "FTLExceptionTarget.h" +#include "FTLOSRExit.h" +#include "FTLOSRExitHandle.h" +#include "FTLState.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +RefPtr<PatchpointExceptionHandle> PatchpointExceptionHandle::create( + State& state, OSRExitDescriptor* descriptor, NodeOrigin origin, unsigned offset, + const HandlerInfo& handler) +{ + return adoptRef(new PatchpointExceptionHandle(state, descriptor, origin, offset, handler)); +} + +RefPtr<PatchpointExceptionHandle> PatchpointExceptionHandle::defaultHandle(State& state) +{ + if (!state.defaultExceptionHandle) { + state.defaultExceptionHandle = adoptRef( + new PatchpointExceptionHandle(state, nullptr, NodeOrigin(), 0, HandlerInfo())); + } + return state.defaultExceptionHandle; +} + +PatchpointExceptionHandle::~PatchpointExceptionHandle() +{ +} + +RefPtr<ExceptionTarget> PatchpointExceptionHandle::scheduleExitCreation( + const B3::StackmapGenerationParams& params) +{ + if (!m_descriptor) { + // NOTE: This object could be a singleton, however usually we toss the ExceptionHandler + // object shortly after creation. + bool isDefaultHandler = true; + return adoptRef( + new ExceptionTarget(isDefaultHandler, m_state.exceptionHandler, nullptr)); + } + bool isDefaultHandler = false; + return adoptRef(new ExceptionTarget(isDefaultHandler, { }, createHandle(ExceptionCheck, params))); +} + +void PatchpointExceptionHandle::scheduleExitCreationForUnwind( + const B3::StackmapGenerationParams& params, CallSiteIndex callSiteIndex) +{ + if (!m_descriptor) + return; + + RefPtr<OSRExitHandle> handle = createHandle(GenericUnwind, params); + + handle->exit.m_exceptionHandlerCallSiteIndex = callSiteIndex; + + HandlerInfo handler = m_handler; + params.addLatePath( + [handle, handler, callSiteIndex] (CCallHelpers& jit) { + CodeBlock* codeBlock = jit.codeBlock(); + jit.addLinkTask( + [=] (LinkBuffer& linkBuffer) { + HandlerInfo newHandler = handler; + newHandler.start = callSiteIndex.bits(); + newHandler.end = callSiteIndex.bits() + 1; + newHandler.nativeCode = linkBuffer.locationOf(handle->label); + codeBlock->appendExceptionHandler(newHandler); + }); + }); +} + +PatchpointExceptionHandle::PatchpointExceptionHandle( + State& state, OSRExitDescriptor* descriptor, NodeOrigin origin, unsigned offset, + const HandlerInfo& handler) + : m_state(state) + , m_descriptor(descriptor) + , m_origin(origin) + , m_offset(offset) + , m_handler(handler) +{ +} + +RefPtr<OSRExitHandle> PatchpointExceptionHandle::createHandle( + ExitKind kind, const B3::StackmapGenerationParams& params) +{ + return m_descriptor->emitOSRExitLater( + m_state, kind, m_origin, params, m_offset); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h new file mode 100644 index 000000000..8664aa16b --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLPatchpointExceptionHandle.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLPatchpointExceptionHandle_h +#define FTLPatchpointExceptionHandle_h + +#include "DFGCommon.h" + +#if ENABLE(FTL_JIT) + +#include "DFGNodeOrigin.h" +#include "ExitKind.h" +#include "HandlerInfo.h" +#include <wtf/Ref.h> +#include <wtf/ThreadSafeRefCounted.h> + +namespace JSC { + +namespace B3 { +class StackmapGenerationParams; +} // namespace B3 + +namespace FTL { + +class ExceptionTarget; +class State; +struct OSRExitDescriptor; +struct OSRExitHandle; + +class PatchpointExceptionHandle : public ThreadSafeRefCounted<PatchpointExceptionHandle> { +public: + static RefPtr<PatchpointExceptionHandle> create( + State&, OSRExitDescriptor*, DFG::NodeOrigin, unsigned offset, const HandlerInfo&); + + static RefPtr<PatchpointExceptionHandle> defaultHandle(State&); + + ~PatchpointExceptionHandle(); + + // Note that you can use this handle to schedule any number of exits. This capability is here for + // two reasons: + // + // - B3 code duplication. B3 could take a patchpoint and turn it into multiple patchpoints if it + // duplicates code. Duplicating code is legal since you can do it without changing the behavior + // of the program. One example is tail duplication. Another is jump threading. Yet another is + // path specialization. You will have one PatchpointExceptionHandle per patchpoint you create + // during DFG->B3 lowering, and that patchpoint will have a generator that calls + // handle->scheduleBlah(). That generator will be called multiple times if your patchpoint got + // duplicated. + // + // - Combination of unwind and non-unwind exception handlers inside one patchpoint. A GetById may + // need both an exception handler that serves as an unwind target and an exception handler that + // is branched to directly for operation calls emitted inside the patchpoint. In that case, + // you'll call both scheduleExitCreation() and scheduleExitCreationForUnwind() on the same + // handle. + + // Schedules the creation of an OSR exit jump destination. You don't know when this will be + // created, but it will happen before linking. You can link jumps to it during link time. That's + // why this returns an ExceptionTarget. That will contain the jump destination (target->label()) + // at link time. + RefPtr<ExceptionTarget> scheduleExitCreation(const B3::StackmapGenerationParams&); + + // Schedules the creation of an OSR exit jump destination, and ensures that it gets associated + // with the handler for some callsite index. + void scheduleExitCreationForUnwind(const B3::StackmapGenerationParams&, CallSiteIndex); + +private: + PatchpointExceptionHandle( + State&, OSRExitDescriptor*, DFG::NodeOrigin, unsigned offset, const HandlerInfo&); + + RefPtr<OSRExitHandle> createHandle(ExitKind, const B3::StackmapGenerationParams&); + + State& m_state; + OSRExitDescriptor* m_descriptor; + DFG::NodeOrigin m_origin; + unsigned m_offset; + HandlerInfo m_handler; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLPatchpointExceptionHandle_h + diff --git a/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp new file mode 100644 index 000000000..70770dd81 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLRecoveryOpcode.h" + +#if ENABLE(FTL_JIT) + +#include <wtf/PrintStream.h> + +namespace WTF { + +void printInternal(PrintStream& out, JSC::FTL::RecoveryOpcode opcode) +{ + switch (opcode) { + case JSC::FTL::AddRecovery: + out.print("Add"); + return; + case JSC::FTL::SubRecovery: + out.print("Sub"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h new file mode 100644 index 000000000..43a755a94 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLRecoveryOpcode.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLRecoveryOpcode_h +#define FTLRecoveryOpcode_h + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +enum RecoveryOpcode { + AddRecovery, + SubRecovery +}; + +} } // namespace JSC::FTL + +namespace WTF { + +class PrintStream; +void printInternal(PrintStream&, JSC::FTL::RecoveryOpcode); + +} // namespace WTF + +#endif // ENABLE(FTL_JIT) + +#endif // FTLRecoveryOpcode_h + diff --git a/Source/JavaScriptCore/ftl/FTLSaveRestore.cpp b/Source/JavaScriptCore/ftl/FTLSaveRestore.cpp new file mode 100644 index 000000000..c752fd22f --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSaveRestore.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLSaveRestore.h" + +#if ENABLE(FTL_JIT) + +#include "FPRInfo.h" +#include "GPRInfo.h" +#include "MacroAssembler.h" +#include "RegisterSet.h" + +namespace JSC { namespace FTL { + +static size_t bytesForGPRs() +{ + return MacroAssembler::numberOfRegisters() * sizeof(int64_t); +} + +static size_t bytesForFPRs() +{ + // FIXME: It might be worthwhile saving the full state of the FP registers, at some point. + // Right now we don't need this since we only do the save/restore just prior to OSR exit, and + // OSR exit will be guaranteed to only need the double portion of the FP registers. + return MacroAssembler::numberOfFPRegisters() * sizeof(double); +} + +size_t requiredScratchMemorySizeInBytes() +{ + return bytesForGPRs() + bytesForFPRs(); +} + +size_t offsetOfGPR(GPRReg reg) +{ + return MacroAssembler::registerIndex(reg) * sizeof(int64_t); +} + +size_t offsetOfFPR(FPRReg reg) +{ + return bytesForGPRs() + MacroAssembler::fpRegisterIndex(reg) * sizeof(double); +} + +size_t offsetOfReg(Reg reg) +{ + if (reg.isGPR()) + return offsetOfGPR(reg.gpr()); + return offsetOfFPR(reg.fpr()); +} + +namespace { + +struct Regs { + Regs() + { + special = RegisterSet::stackRegisters(); + special.merge(RegisterSet::reservedHardwareRegisters()); + + first = MacroAssembler::firstRegister(); + while (special.get(first)) + first = MacroAssembler::nextRegister(first); + second = MacroAssembler::nextRegister(first); + while (special.get(second)) + second = MacroAssembler::nextRegister(second); + } + + RegisterSet special; + GPRReg first; + GPRReg second; +}; + +} // anonymous namespace + +void saveAllRegisters(MacroAssembler& jit, char* scratchMemory) +{ + Regs regs; + + // Get the first register out of the way, so that we can use it as a pointer. + jit.poke64(regs.first, 0); + jit.move(MacroAssembler::TrustedImmPtr(scratchMemory), regs.first); + + // Get all of the other GPRs out of the way. + for (MacroAssembler::RegisterID reg = regs.second; reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) { + if (regs.special.get(reg)) + continue; + jit.store64(reg, MacroAssembler::Address(regs.first, offsetOfGPR(reg))); + } + + // Restore the first register into the second one and save it. + jit.peek64(regs.second, 0); + jit.store64(regs.second, MacroAssembler::Address(regs.first, offsetOfGPR(regs.first))); + + // Finally save all FPR's. + for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) { + if (regs.special.get(reg)) + continue; + jit.storeDouble(reg, MacroAssembler::Address(regs.first, offsetOfFPR(reg))); + } +} + +void restoreAllRegisters(MacroAssembler& jit, char* scratchMemory) +{ + Regs regs; + + // Give ourselves a pointer to the scratch memory. + jit.move(MacroAssembler::TrustedImmPtr(scratchMemory), regs.first); + + // Restore all FPR's. + for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) { + if (regs.special.get(reg)) + continue; + jit.loadDouble(MacroAssembler::Address(regs.first, offsetOfFPR(reg)), reg); + } + + for (MacroAssembler::RegisterID reg = regs.second; reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) { + if (regs.special.get(reg)) + continue; + jit.load64(MacroAssembler::Address(regs.first, offsetOfGPR(reg)), reg); + } + + jit.load64(MacroAssembler::Address(regs.first, offsetOfGPR(regs.first)), regs.first); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLSaveRestore.h b/Source/JavaScriptCore/ftl/FTLSaveRestore.h new file mode 100644 index 000000000..720c76c3e --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSaveRestore.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLSaveRestore_h +#define FTLSaveRestore_h + +#if ENABLE(FTL_JIT) + +#include "FPRInfo.h" +#include "GPRInfo.h" +#include "Reg.h" + +namespace JSC { + +class MacroAssembler; + +namespace FTL { + +size_t requiredScratchMemorySizeInBytes(); + +size_t offsetOfReg(Reg); +size_t offsetOfGPR(GPRReg); +size_t offsetOfFPR(FPRReg); + +// Assumes that top-of-stack can be used as a pointer-sized scratchpad. Saves all of +// the registers into the scratch buffer such that RegisterID * sizeof(int64_t) is the +// offset of every register. +void saveAllRegisters(MacroAssembler& jit, char* scratchMemory); + +void restoreAllRegisters(MacroAssembler& jit, char* scratchMemory); + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLSaveRestore_h + diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp b/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp new file mode 100644 index 000000000..eeba48274 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSlowPathCall.cpp @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLSlowPathCall.h" + +#if ENABLE(FTL_JIT) + +#include "CCallHelpers.h" +#include "FTLState.h" +#include "FTLThunks.h" +#include "GPRInfo.h" +#include "JSCInlines.h" + +namespace JSC { namespace FTL { + +// This code relies on us being 64-bit. FTL is currently always 64-bit. +static const size_t wordSize = 8; + +SlowPathCallContext::SlowPathCallContext( + RegisterSet usedRegisters, CCallHelpers& jit, unsigned numArgs, GPRReg returnRegister) + : m_jit(jit) + , m_numArgs(numArgs) + , m_returnRegister(returnRegister) +{ + // We don't care that you're using callee-save, stack, or hardware registers. + usedRegisters.exclude(RegisterSet::stackRegisters()); + usedRegisters.exclude(RegisterSet::reservedHardwareRegisters()); + usedRegisters.exclude(RegisterSet::calleeSaveRegisters()); + + // The return register doesn't need to be saved. + if (m_returnRegister != InvalidGPRReg) + usedRegisters.clear(m_returnRegister); + + size_t stackBytesNeededForReturnAddress = wordSize; + + m_offsetToSavingArea = + (std::max(m_numArgs, NUMBER_OF_ARGUMENT_REGISTERS) - NUMBER_OF_ARGUMENT_REGISTERS) * wordSize; + + for (unsigned i = std::min(NUMBER_OF_ARGUMENT_REGISTERS, numArgs); i--;) + m_argumentRegisters.set(GPRInfo::toArgumentRegister(i)); + m_callingConventionRegisters.merge(m_argumentRegisters); + if (returnRegister != InvalidGPRReg) + m_callingConventionRegisters.set(GPRInfo::returnValueGPR); + m_callingConventionRegisters.filter(usedRegisters); + + unsigned numberOfCallingConventionRegisters = + m_callingConventionRegisters.numberOfSetRegisters(); + + size_t offsetToThunkSavingArea = + m_offsetToSavingArea + + numberOfCallingConventionRegisters * wordSize; + + m_stackBytesNeeded = + offsetToThunkSavingArea + + stackBytesNeededForReturnAddress + + (usedRegisters.numberOfSetRegisters() - numberOfCallingConventionRegisters) * wordSize; + + m_stackBytesNeeded = (m_stackBytesNeeded + stackAlignmentBytes() - 1) & ~(stackAlignmentBytes() - 1); + + m_jit.subPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister); + + m_thunkSaveSet = usedRegisters; + + // This relies on all calling convention registers also being temp registers. + unsigned stackIndex = 0; + for (unsigned i = GPRInfo::numberOfRegisters; i--;) { + GPRReg reg = GPRInfo::toRegister(i); + if (!m_callingConventionRegisters.get(reg)) + continue; + m_jit.storePtr(reg, CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize)); + m_thunkSaveSet.clear(reg); + } + + m_offset = offsetToThunkSavingArea; +} + +SlowPathCallContext::~SlowPathCallContext() +{ + if (m_returnRegister != InvalidGPRReg) + m_jit.move(GPRInfo::returnValueGPR, m_returnRegister); + + unsigned stackIndex = 0; + for (unsigned i = GPRInfo::numberOfRegisters; i--;) { + GPRReg reg = GPRInfo::toRegister(i); + if (!m_callingConventionRegisters.get(reg)) + continue; + m_jit.loadPtr(CCallHelpers::Address(CCallHelpers::stackPointerRegister, m_offsetToSavingArea + (stackIndex++) * wordSize), reg); + } + + m_jit.addPtr(CCallHelpers::TrustedImm32(m_stackBytesNeeded), CCallHelpers::stackPointerRegister); +} + +SlowPathCallKey SlowPathCallContext::keyWithTarget(void* callTarget) const +{ + return SlowPathCallKey(m_thunkSaveSet, callTarget, m_argumentRegisters, m_offset); +} + +SlowPathCall SlowPathCallContext::makeCall(void* callTarget) +{ + SlowPathCall result = SlowPathCall(m_jit.call(), keyWithTarget(callTarget)); + + m_jit.addLinkTask( + [result] (LinkBuffer& linkBuffer) { + VM& vm = linkBuffer.vm(); + + MacroAssemblerCodeRef thunk = + vm.ftlThunks->getSlowPathCallThunk(vm, result.key()); + + linkBuffer.link(result.call(), CodeLocationLabel(thunk.code())); + }); + + return result; +} + +CallSiteIndex callSiteIndexForCodeOrigin(State& state, CodeOrigin codeOrigin) +{ + if (codeOrigin) + return state.jitCode->common.addCodeOrigin(codeOrigin); + return CallSiteIndex(); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCall.h b/Source/JavaScriptCore/ftl/FTLSlowPathCall.h new file mode 100644 index 000000000..442ffe9b2 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSlowPathCall.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLSlowPathCall_h +#define FTLSlowPathCall_h + +#if ENABLE(FTL_JIT) + +#include "CCallHelpers.h" +#include "FTLSlowPathCallKey.h" +#include "JITOperations.h" +#include "StructureStubInfo.h" + +namespace JSC { namespace FTL { + +class State; + +class SlowPathCall { +public: + SlowPathCall() { } + + SlowPathCall(MacroAssembler::Call call, const SlowPathCallKey& key) + : m_call(call) + , m_key(key) + { + } + + MacroAssembler::Call call() const { return m_call; } + SlowPathCallKey key() const { return m_key; } + +private: + MacroAssembler::Call m_call; + SlowPathCallKey m_key; +}; + +// This will be an RAII thingy that will set up the necessary stack sizes and offsets and such. +class SlowPathCallContext { +public: + SlowPathCallContext(RegisterSet usedRegisters, CCallHelpers&, unsigned numArgs, GPRReg returnRegister); + ~SlowPathCallContext(); + + // NOTE: The call that this returns is already going to be linked by the JIT using addLinkTask(), + // so there is no need for you to link it yourself. + SlowPathCall makeCall(void* callTarget); + +private: + SlowPathCallKey keyWithTarget(void* callTarget) const; + + RegisterSet m_argumentRegisters; + RegisterSet m_callingConventionRegisters; + CCallHelpers& m_jit; + unsigned m_numArgs; + GPRReg m_returnRegister; + size_t m_offsetToSavingArea; + size_t m_stackBytesNeeded; + RegisterSet m_thunkSaveSet; + ptrdiff_t m_offset; +}; + +template<typename... ArgumentTypes> +SlowPathCall callOperation( + const RegisterSet& usedRegisters, CCallHelpers& jit, CCallHelpers::JumpList* exceptionTarget, + FunctionPtr function, GPRReg resultGPR, ArgumentTypes... arguments) +{ + SlowPathCall call; + { + SlowPathCallContext context(usedRegisters, jit, sizeof...(ArgumentTypes) + 1, resultGPR); + jit.setupArgumentsWithExecState(arguments...); + call = context.makeCall(function.value()); + } + if (exceptionTarget) + exceptionTarget->append(jit.emitExceptionCheck()); + return call; +} + +template<typename... ArgumentTypes> +SlowPathCall callOperation( + const RegisterSet& usedRegisters, CCallHelpers& jit, CallSiteIndex callSiteIndex, + CCallHelpers::JumpList* exceptionTarget, FunctionPtr function, GPRReg resultGPR, + ArgumentTypes... arguments) +{ + if (callSiteIndex) { + jit.store32( + CCallHelpers::TrustedImm32(callSiteIndex.bits()), + CCallHelpers::tagFor(JSStack::ArgumentCount)); + } + return callOperation(usedRegisters, jit, exceptionTarget, function, resultGPR, arguments...); +} + +CallSiteIndex callSiteIndexForCodeOrigin(State&, CodeOrigin); + +template<typename... ArgumentTypes> +SlowPathCall callOperation( + State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, CodeOrigin codeOrigin, + CCallHelpers::JumpList* exceptionTarget, FunctionPtr function, GPRReg result, ArgumentTypes... arguments) +{ + return callOperation( + usedRegisters, jit, callSiteIndexForCodeOrigin(state, codeOrigin), exceptionTarget, function, + result, arguments...); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLSlowPathCall_h + diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp new file mode 100644 index 000000000..4cc835d82 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLSlowPathCallKey.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +void SlowPathCallKey::dump(PrintStream& out) const +{ + out.print("<usedRegisters = ", m_usedRegisters, ", offset = ", m_offset, ", callTarget = ", RawPointer(m_callTarget), ", argumentRegisters = ", m_argumentRegisters, ">"); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h new file mode 100644 index 000000000..22f87107d --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSlowPathCallKey.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLSlowPathCallKey_h +#define FTLSlowPathCallKey_h + +#if ENABLE(FTL_JIT) + +#include "RegisterSet.h" + +namespace JSC { namespace FTL { + +// This is used for creating some sanity in slow-path calls out of the FTL's inline +// caches. The idea is that we don't want all of the register save/restore stuff to +// be generated at each IC site. Instead, the IC slow path call site will just save +// the registers needed for the arguments. It will arrange for there to be enough +// space on top of stack to save the remaining registers and the return PC. Then it +// will call a shared thunk that will save the remaining registers. That thunk needs +// to know the stack offset at which things get saved along with the call target. + +// Note that the offset is *not including* the return PC that would be pushed on X86. + +class SlowPathCallKey { +public: + SlowPathCallKey() + : m_callTarget(0) + , m_offset(0) + { + } + + SlowPathCallKey( + const RegisterSet& set, void* callTarget, const RegisterSet& argumentRegisters, + ptrdiff_t offset) + : m_usedRegisters(set) + , m_callTarget(callTarget) + , m_argumentRegisters(argumentRegisters) + , m_offset(offset) + { + } + + const RegisterSet& usedRegisters() const { return m_usedRegisters; } + void* callTarget() const { return m_callTarget; } + const RegisterSet& argumentRegisters() const { return m_argumentRegisters; } + ptrdiff_t offset() const { return m_offset; } + + SlowPathCallKey withCallTarget(void* callTarget) + { + return SlowPathCallKey(usedRegisters(), callTarget, argumentRegisters(), offset()); + } + + void dump(PrintStream&) const; + + enum EmptyValueTag { EmptyValue }; + enum DeletedValueTag { DeletedValue }; + + SlowPathCallKey(EmptyValueTag) + : m_usedRegisters(RegisterSet::EmptyValue) + , m_callTarget(0) + , m_offset(0) + { + } + + SlowPathCallKey(DeletedValueTag) + : m_usedRegisters(RegisterSet::DeletedValue) + , m_callTarget(0) + , m_offset(0) + { + } + + bool isEmptyValue() const { return m_usedRegisters.isEmptyValue(); } + bool isDeletedValue() const { return m_usedRegisters.isDeletedValue(); } + + bool operator==(const SlowPathCallKey& other) const + { + return m_usedRegisters == other.m_usedRegisters + && m_callTarget == other.m_callTarget + && m_offset == other.m_offset; + } + unsigned hash() const + { + return m_usedRegisters.hash() + PtrHash<void*>::hash(m_callTarget) + m_offset; + } + +private: + RegisterSet m_usedRegisters; + void* m_callTarget; + RegisterSet m_argumentRegisters; + ptrdiff_t m_offset; +}; + +struct SlowPathCallKeyHash { + static unsigned hash(const SlowPathCallKey& key) { return key.hash(); } + static bool equal(const SlowPathCallKey& a, const SlowPathCallKey& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = false; +}; + +} } // namespace JSC::FTL + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::FTL::SlowPathCallKey> { + typedef JSC::FTL::SlowPathCallKeyHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::FTL::SlowPathCallKey> : public CustomHashTraits<JSC::FTL::SlowPathCallKey> { }; + +} // namespace WTF + +#endif // ENABLE(FTL_JIT) + +#endif // FTLSlowPathCallKey_h + diff --git a/Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h b/Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h new file mode 100644 index 000000000..9caa2622b --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLStackmapArgumentList.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLStackmapArgumentList_h +#define FTLStackmapArgumentList_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" +#include <wtf/Vector.h> + +namespace JSC { namespace FTL { + +typedef Vector<LValue, 16> StackmapArgumentList; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLStackmapArgumentList_h + diff --git a/Source/JavaScriptCore/ftl/FTLState.cpp b/Source/JavaScriptCore/ftl/FTLState.cpp new file mode 100644 index 000000000..2b90aa44a --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLState.cpp @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLState.h" + +#if ENABLE(FTL_JIT) + +#include "CodeBlockWithJITType.h" +#include "FTLForOSREntryJITCode.h" +#include "FTLJITCode.h" +#include "FTLJITFinalizer.h" +#include "FTLPatchpointExceptionHandle.h" +#include <stdio.h> + +namespace JSC { namespace FTL { + +using namespace B3; +using namespace DFG; + +State::State(Graph& graph) + : graph(graph) + , generatedFunction(0) +{ + switch (graph.m_plan.mode) { + case FTLMode: { + jitCode = adoptRef(new JITCode()); + break; + } + case FTLForOSREntryMode: { + RefPtr<ForOSREntryJITCode> code = adoptRef(new ForOSREntryJITCode()); + code->initializeEntryBuffer(graph.m_vm, graph.m_profiledBlock->m_numCalleeLocals); + code->setBytecodeIndex(graph.m_plan.osrEntryBytecodeIndex); + jitCode = code; + break; + } + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + + graph.m_plan.finalizer = std::make_unique<JITFinalizer>(graph.m_plan); + finalizer = static_cast<JITFinalizer*>(graph.m_plan.finalizer.get()); + + proc = std::make_unique<Procedure>(); + + proc->setOriginPrinter( + [] (PrintStream& out, B3::Origin origin) { + out.print("DFG:", bitwise_cast<Node*>(origin.data())); + }); + + proc->setFrontendData(&graph); +} + +State::~State() +{ +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLState.h b/Source/JavaScriptCore/ftl/FTLState.h new file mode 100644 index 000000000..023b8d34b --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLState.h @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2013-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLState_h +#define FTLState_h + +#if ENABLE(FTL_JIT) + +#include "B3Procedure.h" +#include "DFGCommon.h" +#include "DFGGraph.h" +#include "FTLAbbreviatedTypes.h" +#include "FTLGeneratedFunction.h" +#include "FTLJITCode.h" +#include "FTLJITFinalizer.h" +#include <wtf/Box.h> +#include <wtf/Noncopyable.h> + +namespace JSC { + +namespace B3 { +class PatchpointValue; +class StackSlot; +} // namespace B3 + +namespace FTL { + +class PatchpointExceptionHandle; + +inline bool verboseCompilationEnabled() +{ + return DFG::verboseCompilationEnabled(DFG::FTLMode); +} + +inline bool shouldDumpDisassembly() +{ + return DFG::shouldDumpDisassembly(DFG::FTLMode); +} + +class State { + WTF_MAKE_NONCOPYABLE(State); + +public: + State(DFG::Graph& graph); + ~State(); + + // None of these things is owned by State. It is the responsibility of + // FTL phases to properly manage the lifecycle of the module and function. + DFG::Graph& graph; + std::unique_ptr<B3::Procedure> proc; + bool allocationFailed { false }; // Throw out the compilation once B3 returns. + RefPtr<JITCode> jitCode; + GeneratedFunction generatedFunction; + JITFinalizer* finalizer; + // Top-level exception handler. Jump here if you know that you have to genericUnwind() and there + // are no applicable catch blocks anywhere in the Graph. + RefPtr<PatchpointExceptionHandle> defaultExceptionHandle; + Box<CCallHelpers::Label> exceptionHandler { Box<CCallHelpers::Label>::create() }; + B3::StackSlot* capturedValue { nullptr }; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLState_h + diff --git a/Source/JavaScriptCore/ftl/FTLSwitchCase.h b/Source/JavaScriptCore/ftl/FTLSwitchCase.h new file mode 100644 index 000000000..cd6bc078d --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLSwitchCase.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLSwitchCase_h +#define FTLSwitchCase_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" +#include "FTLWeight.h" + +namespace JSC { namespace FTL { + +class SwitchCase { +public: + SwitchCase() + : m_value(nullptr) + , m_target(nullptr) + { + } + + SwitchCase(LValue value, LBasicBlock target, Weight weight = Weight()) + : m_value(value) + , m_target(target) + , m_weight(weight) + { + } + + LValue value() const { return m_value; } + LBasicBlock target() const { return m_target; } + Weight weight() const { return m_weight; } + +private: + LValue m_value; + LBasicBlock m_target; + Weight m_weight; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLSwitchCase_h + diff --git a/Source/JavaScriptCore/ftl/FTLThunks.cpp b/Source/JavaScriptCore/ftl/FTLThunks.cpp new file mode 100644 index 000000000..cfbc75b7c --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLThunks.cpp @@ -0,0 +1,233 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLThunks.h" + +#if ENABLE(FTL_JIT) + +#include "AssemblyHelpers.h" +#include "DFGOSRExitCompilerCommon.h" +#include "FPRInfo.h" +#include "FTLOSRExitCompiler.h" +#include "FTLOperations.h" +#include "FTLSaveRestore.h" +#include "GPRInfo.h" +#include "LinkBuffer.h" + +namespace JSC { namespace FTL { + +using namespace DFG; + +enum class FrameAndStackAdjustmentRequirement { + Needed, + NotNeeded +}; + +static MacroAssemblerCodeRef genericGenerationThunkGenerator( + VM* vm, FunctionPtr generationFunction, const char* name, unsigned extraPopsToRestore, FrameAndStackAdjustmentRequirement frameAndStackAdjustmentRequirement) +{ + AssemblyHelpers jit(vm, 0); + + if (frameAndStackAdjustmentRequirement == FrameAndStackAdjustmentRequirement::Needed) { + // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer. + adjustFrameAndStackInOSRExitCompilerThunk<FTL::JITCode>(jit, vm, JITCode::FTLJIT); + } + + // Note that the "return address" will be the ID that we pass to the generation function. + + ptrdiff_t stackMisalignment = MacroAssembler::pushToSaveByteOffset(); + + // Pretend that we're a C call frame. + jit.pushToSave(MacroAssembler::framePointerRegister); + jit.move(MacroAssembler::stackPointerRegister, MacroAssembler::framePointerRegister); + stackMisalignment += MacroAssembler::pushToSaveByteOffset(); + + // Now create ourselves enough stack space to give saveAllRegisters() a scratch slot. + unsigned numberOfRequiredPops = 0; + do { + jit.pushToSave(GPRInfo::regT0); + stackMisalignment += MacroAssembler::pushToSaveByteOffset(); + numberOfRequiredPops++; + } while (stackMisalignment % stackAlignmentBytes()); + + ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(requiredScratchMemorySizeInBytes()); + char* buffer = static_cast<char*>(scratchBuffer->dataBuffer()); + + saveAllRegisters(jit, buffer); + + // Tell GC mark phase how much of the scratch buffer is active during call. + jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::nonArgGPR0); + jit.storePtr(MacroAssembler::TrustedImmPtr(requiredScratchMemorySizeInBytes()), GPRInfo::nonArgGPR0); + + jit.loadPtr(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + jit.peek( + GPRInfo::argumentGPR1, + (stackMisalignment - MacroAssembler::pushToSaveByteOffset()) / sizeof(void*)); + MacroAssembler::Call functionCall = jit.call(); + + // At this point we want to make a tail call to what was returned to us in the + // returnValueGPR. But at the same time as we do this, we must restore all registers. + // The way we will accomplish this is by arranging to have the tail call target in the + // return address "slot" (be it a register or the stack). + + jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); + + // Make sure we tell the GC that we're not using the scratch buffer anymore. + jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT1); + jit.storePtr(MacroAssembler::TrustedImmPtr(0), GPRInfo::regT1); + + // Prepare for tail call. + while (numberOfRequiredPops--) + jit.popToRestore(GPRInfo::regT1); + jit.popToRestore(MacroAssembler::framePointerRegister); + + // When we came in here, there was an additional thing pushed to the stack. Some clients want it + // popped before proceeding. + while (extraPopsToRestore--) + jit.popToRestore(GPRInfo::regT1); + + // Put the return address wherever the return instruction wants it. On all platforms, this + // ensures that the return address is out of the way of register restoration. + jit.restoreReturnAddressBeforeReturn(GPRInfo::regT0); + + restoreAllRegisters(jit, buffer); + + jit.ret(); + + LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + patchBuffer.link(functionCall, generationFunction); + return FINALIZE_CODE(patchBuffer, ("%s", name)); +} + +MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm) +{ + unsigned extraPopsToRestore = 0; + return genericGenerationThunkGenerator( + vm, compileFTLOSRExit, "FTL OSR exit generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::Needed); +} + +MacroAssemblerCodeRef lazySlowPathGenerationThunkGenerator(VM* vm) +{ + unsigned extraPopsToRestore = 1; + return genericGenerationThunkGenerator( + vm, compileFTLLazySlowPath, "FTL lazy slow path generation thunk", extraPopsToRestore, FrameAndStackAdjustmentRequirement::NotNeeded); +} + +static void registerClobberCheck(AssemblyHelpers& jit, RegisterSet dontClobber) +{ + if (!Options::clobberAllRegsInFTLICSlowPath()) + return; + + RegisterSet clobber = RegisterSet::allRegisters(); + clobber.exclude(RegisterSet::reservedHardwareRegisters()); + clobber.exclude(RegisterSet::stackRegisters()); + clobber.exclude(RegisterSet::calleeSaveRegisters()); + clobber.exclude(dontClobber); + + GPRReg someGPR; + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!clobber.get(reg) || !reg.isGPR()) + continue; + + jit.move(AssemblyHelpers::TrustedImm32(0x1337beef), reg.gpr()); + someGPR = reg.gpr(); + } + + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!clobber.get(reg) || !reg.isFPR()) + continue; + + jit.move64ToDouble(someGPR, reg.fpr()); + } +} + +MacroAssemblerCodeRef slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey& key) +{ + AssemblyHelpers jit(&vm, 0); + + // We want to save the given registers at the given offset, then we want to save the + // old return address somewhere past that offset, and then finally we want to make the + // call. + + size_t currentOffset = key.offset() + sizeof(void*); + +#if CPU(X86) || CPU(X86_64) + currentOffset += sizeof(void*); +#endif + + for (MacroAssembler::RegisterID reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = static_cast<MacroAssembler::RegisterID>(reg + 1)) { + if (!key.usedRegisters().get(reg)) + continue; + jit.storePtr(reg, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset)); + currentOffset += sizeof(void*); + } + + for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = static_cast<MacroAssembler::FPRegisterID>(reg + 1)) { + if (!key.usedRegisters().get(reg)) + continue; + jit.storeDouble(reg, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset)); + currentOffset += sizeof(double); + } + + jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR0); + jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset())); + + registerClobberCheck(jit, key.argumentRegisters()); + + AssemblyHelpers::Call call = jit.call(); + + jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset()), GPRInfo::nonPreservedNonReturnGPR); + jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); + + for (MacroAssembler::FPRegisterID reg = MacroAssembler::lastFPRegister(); ; reg = static_cast<MacroAssembler::FPRegisterID>(reg - 1)) { + if (key.usedRegisters().get(reg)) { + currentOffset -= sizeof(double); + jit.loadDouble(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset), reg); + } + if (reg == MacroAssembler::firstFPRegister()) + break; + } + + for (MacroAssembler::RegisterID reg = MacroAssembler::lastRegister(); ; reg = static_cast<MacroAssembler::RegisterID>(reg - 1)) { + if (key.usedRegisters().get(reg)) { + currentOffset -= sizeof(void*); + jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset), reg); + } + if (reg == MacroAssembler::firstRegister()) + break; + } + + jit.ret(); + + LinkBuffer patchBuffer(vm, jit, GLOBAL_THUNK_ID); + patchBuffer.link(call, FunctionPtr(key.callTarget())); + return FINALIZE_CODE(patchBuffer, ("FTL slow path call thunk for %s", toCString(key).data())); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + diff --git a/Source/JavaScriptCore/ftl/FTLThunks.h b/Source/JavaScriptCore/ftl/FTLThunks.h new file mode 100644 index 000000000..68a9e8f3b --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLThunks.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLThunks_h +#define FTLThunks_h + +#if ENABLE(FTL_JIT) + +#include "FTLLocation.h" +#include "FTLSlowPathCallKey.h" +#include "MacroAssemblerCodeRef.h" +#include <wtf/HashMap.h> + +namespace JSC { + +class VM; + +namespace FTL { + +MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM*); +MacroAssemblerCodeRef lazySlowPathGenerationThunkGenerator(VM*); +MacroAssemblerCodeRef slowPathCallThunkGenerator(VM&, const SlowPathCallKey&); + +template<typename KeyTypeArgument> +struct ThunkMap { + typedef KeyTypeArgument KeyType; + typedef HashMap<KeyType, MacroAssemblerCodeRef> ToThunkMap; + typedef HashMap<MacroAssemblerCodePtr, KeyType> FromThunkMap; + + ToThunkMap m_toThunk; + FromThunkMap m_fromThunk; +}; + +template<typename MapType, typename GeneratorType> +MacroAssemblerCodeRef generateIfNecessary( + VM& vm, MapType& map, const typename MapType::KeyType& key, GeneratorType generator) +{ + typename MapType::ToThunkMap::iterator iter = map.m_toThunk.find(key); + if (iter != map.m_toThunk.end()) + return iter->value; + + MacroAssemblerCodeRef result = generator(vm, key); + map.m_toThunk.add(key, result); + map.m_fromThunk.add(result.code(), key); + return result; +} + +template<typename MapType> +typename MapType::KeyType keyForThunk(MapType& map, MacroAssemblerCodePtr ptr) +{ + typename MapType::FromThunkMap::iterator iter = map.m_fromThunk.find(ptr); + RELEASE_ASSERT(iter != map.m_fromThunk.end()); + return iter->value; +} + +class Thunks { +public: + MacroAssemblerCodeRef getSlowPathCallThunk(VM& vm, const SlowPathCallKey& key) + { + return generateIfNecessary( + vm, m_slowPathCallThunks, key, slowPathCallThunkGenerator); + } + + SlowPathCallKey keyForSlowPathCallThunk(MacroAssemblerCodePtr ptr) + { + return keyForThunk(m_slowPathCallThunks, ptr); + } + +private: + ThunkMap<SlowPathCallKey> m_slowPathCallThunks; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLTHunks_h diff --git a/Source/JavaScriptCore/ftl/FTLTypedPointer.h b/Source/JavaScriptCore/ftl/FTLTypedPointer.h new file mode 100644 index 000000000..e39c3d86a --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLTypedPointer.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLTypedPointer_h +#define FTLTypedPointer_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" +#include "FTLAbstractHeap.h" + +namespace JSC { namespace FTL { + +class TypedPointer { +public: + TypedPointer() + : m_heap(0) + , m_value(0) + { + } + + TypedPointer(const AbstractHeap& heap, LValue value) + : m_heap(&heap) + , m_value(value) + { + } + + bool operator!() const + { + ASSERT(!m_heap == !m_value); + return !m_heap; + } + + const AbstractHeap* heap() const { return m_heap; } + LValue value() const { return m_value; } + +private: + const AbstractHeap* m_heap; + LValue m_value; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLTypedPointer_h + diff --git a/Source/JavaScriptCore/ftl/FTLValueFromBlock.h b/Source/JavaScriptCore/ftl/FTLValueFromBlock.h new file mode 100644 index 000000000..31246fd22 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLValueFromBlock.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLValueFromBlock_h +#define FTLValueFromBlock_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" + +namespace JSC { namespace FTL { + +class ValueFromBlock { +public: + ValueFromBlock() + : m_value(0) + , m_block(0) + { + } + + ValueFromBlock(LValue value, LBasicBlock block) + : m_value(value) + , m_block(block) + { + } + + LValue value() const { return m_value; } + LBasicBlock block() const { return m_block; } + +private: + LValue m_value; + LBasicBlock m_block; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLValueFromBlock_h + diff --git a/Source/JavaScriptCore/ftl/FTLValueRange.cpp b/Source/JavaScriptCore/ftl/FTLValueRange.cpp new file mode 100644 index 000000000..d46ddafc1 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLValueRange.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "FTLValueRange.h" + +#if ENABLE(FTL_JIT) + +namespace JSC { namespace FTL { + +void ValueRange::decorateInstruction(LValue loadInstruction, unsigned rangeKind) const +{ + UNUSED_PARAM(loadInstruction); + UNUSED_PARAM(rangeKind); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) diff --git a/Source/JavaScriptCore/ftl/FTLValueRange.h b/Source/JavaScriptCore/ftl/FTLValueRange.h new file mode 100644 index 000000000..a7cfae91a --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLValueRange.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLValueRange_h +#define FTLValueRange_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" + +namespace JSC { namespace FTL { + +class ValueRange { +public: + ValueRange() + : m_begin(0) + , m_end(0) + { + } + + ValueRange(LValue begin, LValue end) + : m_begin(begin) + , m_end(end) + { + } + + LValue begin() const { return m_begin; } + LValue end() const { return m_end; } + + void decorateInstruction(LValue loadInstruction, unsigned rangeKind) const; + +private: + LValue m_begin; + LValue m_end; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLValueRange_h + diff --git a/Source/JavaScriptCore/ftl/FTLWeight.h b/Source/JavaScriptCore/ftl/FTLWeight.h new file mode 100644 index 000000000..c5094afdf --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLWeight.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLWeight_h +#define FTLWeight_h + +#if ENABLE(FTL_JIT) + +#include "B3FrequencyClass.h" +#include <wtf/MathExtras.h> +#include <wtf/StdLibExtras.h> + +namespace JSC { namespace FTL { + +class Weight { +public: + Weight() + : m_value(std::numeric_limits<float>::quiet_NaN()) + { + } + + explicit Weight(float value) + : m_value(value) + { + } + + bool isSet() const { return m_value == m_value; } + bool operator!() const { return !isSet(); } + + float value() const { return m_value; } + + B3::FrequencyClass frequencyClass() const { return value() ? B3::FrequencyClass::Normal : B3::FrequencyClass::Rare; } + + // Inverse weight for a two-target branch. + Weight inverse() const + { + if (!isSet()) + return Weight(); + if (value()) + return Weight(0); + return Weight(1); + } + +private: + float m_value; +}; + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLWeight_h + diff --git a/Source/JavaScriptCore/ftl/FTLWeightedTarget.h b/Source/JavaScriptCore/ftl/FTLWeightedTarget.h new file mode 100644 index 000000000..dab5a7e41 --- /dev/null +++ b/Source/JavaScriptCore/ftl/FTLWeightedTarget.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FTLWeightedTarget_h +#define FTLWeightedTarget_h + +#if ENABLE(FTL_JIT) + +#include "FTLAbbreviatedTypes.h" +#include "FTLWeight.h" + +namespace JSC { namespace FTL { + +class WeightedTarget { +public: + WeightedTarget() + : m_target(nullptr) + { + } + + WeightedTarget(LBasicBlock target, Weight weight) + : m_target(target) + , m_weight(weight) + { + } + + WeightedTarget(LBasicBlock target, float weight) + : m_target(target) + , m_weight(weight) + { + } + + LBasicBlock target() const { return m_target; } + Weight weight() const { return m_weight; } + +private: + LBasicBlock m_target; + Weight m_weight; +}; + +// Helpers for creating weighted targets for statically known (or unknown) branch +// profiles. + +inline WeightedTarget usually(LBasicBlock block) +{ + return WeightedTarget(block, 1); +} + +inline WeightedTarget rarely(LBasicBlock block) +{ + return WeightedTarget(block, 0); +} + +// Currently in B3 this is the equivalent of "usually", but we like to make the distinction in +// case we ever make B3 support proper branch weights. We used to do that in LLVM. +inline WeightedTarget unsure(LBasicBlock block) +{ + return WeightedTarget(block, Weight()); +} + +} } // namespace JSC::FTL + +#endif // ENABLE(FTL_JIT) + +#endif // FTLWeightedTarget_h + |