summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2016-05-24 08:28:08 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2016-05-24 08:28:08 +0000
commita4e969f4965059196ca948db781e52f7cfebf19e (patch)
tree6ca352808c8fdc52006a0f33f6ae3c593b23867d /Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
parent41386e9cb918eed93b3f13648cbef387e371e451 (diff)
downloadWebKitGtk-tarball-a4e969f4965059196ca948db781e52f7cfebf19e.tar.gz
webkitgtk-2.12.3webkitgtk-2.12.3
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp277
1 files changed, 218 insertions, 59 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
index 3aa991c48..acfad6521 100644
--- a/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +28,15 @@
#if ENABLE(DFG_JIT)
+#include "DFGAbstractHeap.h"
+#include "DFGClobberize.h"
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGPhase.h"
#include "DFGPredictionPropagationPhase.h"
#include "DFGVariableAccessDataDump.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include <cstdlib>
namespace JSC { namespace DFG {
@@ -70,73 +73,224 @@ private:
{
switch (m_node->op()) {
case BitOr:
- if (m_node->child1()->isConstant()) {
- JSValue op1 = m_graph.valueOfJSConstant(m_node->child1().node());
- if (op1.isInt32() && !op1.asInt32()) {
- convertToIdentityOverChild2();
+ handleCommutativity();
+
+ if (m_node->child1().useKind() != UntypedUse && m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) {
+ convertToIdentityOverChild1();
+ break;
+ }
+ break;
+
+ case BitXor:
+ case BitAnd:
+ handleCommutativity();
+ break;
+
+ case BitLShift:
+ case BitRShift:
+ case BitURShift:
+ if (m_node->child1().useKind() != UntypedUse && m_node->child2()->isInt32Constant() && !(m_node->child2()->asInt32() & 0x1f)) {
+ convertToIdentityOverChild1();
+ break;
+ }
+ break;
+
+ case UInt32ToNumber:
+ if (m_node->child1()->op() == BitURShift
+ && m_node->child1()->child2()->isInt32Constant()
+ && (m_node->child1()->child2()->asInt32() & 0x1f)
+ && m_node->arithMode() != Arith::DoOverflow) {
+ m_node->convertToIdentity();
+ m_changed = true;
+ break;
+ }
+ break;
+
+ case ArithAdd:
+ handleCommutativity();
+
+ if (m_node->child2()->isInt32Constant() && !m_node->child2()->asInt32()) {
+ convertToIdentityOverChild1();
+ break;
+ }
+ break;
+
+ case ArithMul: {
+ handleCommutativity();
+ Edge& child2 = m_node->child2();
+ if (child2->isNumberConstant() && child2->asNumber() == 2) {
+ switch (m_node->binaryUseKind()) {
+ case DoubleRepUse:
+ // It is always valuable to get rid of a double multiplication by 2.
+ // We won't have half-register dependencies issues on x86 and we won't have to load the constants.
+ m_node->setOp(ArithAdd);
+ child2.setNode(m_node->child1().node());
+ m_changed = true;
+ break;
+#if USE(JSVALUE64)
+ case Int52RepUse:
+#endif
+ case Int32Use:
+ // For integers, we can only convert compatible modes.
+ // ArithAdd does handle do negative zero check for example.
+ if (m_node->arithMode() == Arith::CheckOverflow || m_node->arithMode() == Arith::Unchecked) {
+ m_node->setOp(ArithAdd);
+ child2.setNode(m_node->child1().node());
+ m_changed = true;
+ }
+ break;
+ default:
break;
}
}
- if (m_node->child2()->isConstant()) {
- JSValue op2 = m_graph.valueOfJSConstant(m_node->child2().node());
- if (op2.isInt32() && !op2.asInt32()) {
- convertToIdentityOverChild1();
+ break;
+ }
+ case ArithSub:
+ if (m_node->child2()->isInt32Constant()
+ && m_node->isBinaryUseKind(Int32Use)) {
+ int32_t value = m_node->child2()->asInt32();
+ if (-value != value) {
+ m_node->setOp(ArithAdd);
+ m_node->child2().setNode(
+ m_insertionSet.insertConstant(
+ m_nodeIndex, m_node->origin, jsNumber(-value)));
+ m_changed = true;
break;
}
}
break;
-
- case BitLShift:
- case BitRShift:
- case BitURShift:
- if (m_node->child2()->isConstant()) {
- JSValue op2 = m_graph.valueOfJSConstant(m_node->child2().node());
- if (op2.isInt32() && !(op2.asInt32() & 0x1f)) {
+
+ case ArithPow:
+ if (m_node->child2()->isNumberConstant()) {
+ double yOperandValue = m_node->child2()->asNumber();
+ if (yOperandValue == 1) {
convertToIdentityOverChild1();
- break;
+ } else if (yOperandValue == 0.5) {
+ m_insertionSet.insertCheck(m_nodeIndex, m_node);
+ m_node->convertToArithSqrt();
+ m_changed = true;
}
}
break;
+
+ case ArithMod:
+ // On Integers
+ // In: ArithMod(ArithMod(x, const1), const2)
+ // Out: Identity(ArithMod(x, const1))
+ // if const1 <= const2.
+ if (m_node->binaryUseKind() == Int32Use
+ && m_node->child2()->isInt32Constant()
+ && m_node->child1()->op() == ArithMod
+ && m_node->child1()->binaryUseKind() == Int32Use
+ && m_node->child1()->child2()->isInt32Constant()
+ && std::abs(m_node->child1()->child2()->asInt32()) <= std::abs(m_node->child2()->asInt32())) {
+ convertToIdentityOverChild1();
+ }
+ break;
+
+ case ValueRep:
+ case Int52Rep:
+ case DoubleRep: {
+ // This short-circuits circuitous conversions, like ValueRep(DoubleRep(value)) or
+ // even more complicated things. Like, it can handle a beast like
+ // ValueRep(DoubleRep(Int52Rep(value))).
- case UInt32ToNumber:
- if (m_node->child1()->op() == BitURShift
- && m_node->child1()->child2()->isConstant()) {
- JSValue shiftAmount = m_graph.valueOfJSConstant(
- m_node->child1()->child2().node());
- if (shiftAmount.isInt32() && (shiftAmount.asInt32() & 0x1f)) {
+ // The only speculation that we would do beyond validating that we have a type that
+ // can be represented a certain way is an Int32 check that would appear on Int52Rep
+ // nodes. For now, if we see this and the final type we want is an Int52, we use it
+ // as an excuse not to fold. The only thing we would need is a Int52RepInt32Use kind.
+ bool hadInt32Check = false;
+ if (m_node->op() == Int52Rep) {
+ if (m_node->child1().useKind() != Int32Use)
+ break;
+ hadInt32Check = true;
+ }
+ for (Node* node = m_node->child1().node(); ; node = node->child1().node()) {
+ if (canonicalResultRepresentation(node->result()) ==
+ canonicalResultRepresentation(m_node->result())) {
+ m_insertionSet.insertCheck(m_nodeIndex, m_node);
+ if (hadInt32Check) {
+ // FIXME: Consider adding Int52RepInt32Use or even DoubleRepInt32Use,
+ // which would be super weird. The latter would only arise in some
+ // seriously circuitous conversions.
+ if (canonicalResultRepresentation(node->result()) != NodeResultJS)
+ break;
+
+ m_insertionSet.insertCheck(
+ m_nodeIndex, m_node->origin, Edge(node, Int32Use));
+ }
+ m_node->child1() = node->defaultEdge();
m_node->convertToIdentity();
m_changed = true;
break;
}
+
+ switch (node->op()) {
+ case Int52Rep:
+ if (node->child1().useKind() != Int32Use)
+ break;
+ hadInt32Check = true;
+ continue;
+
+ case DoubleRep:
+ case ValueRep:
+ continue;
+
+ default:
+ break;
+ }
+ break;
}
break;
+ }
- case GetArrayLength:
- if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node))
- foldTypedArrayPropertyToConstant(view, jsNumber(view->length()));
- break;
+ case Flush: {
+ ASSERT(m_graph.m_form != SSA);
- case GetTypedArrayByteOffset:
- if (JSArrayBufferView* view = m_graph.tryGetFoldableView(m_node->child1().node()))
- foldTypedArrayPropertyToConstant(view, jsNumber(view->byteOffset()));
- break;
+ Node* setLocal = nullptr;
+ VirtualRegister local = m_node->local();
- case GetIndexedPropertyStorage:
- if (JSArrayBufferView* view = m_graph.tryGetFoldableViewForChild1(m_node)) {
- if (view->mode() != FastTypedArray) {
- prepareToFoldTypedArray(view);
- m_node->convertToConstantStoragePointer(view->vector());
- m_changed = true;
+ for (unsigned i = m_nodeIndex; i--;) {
+ Node* node = m_block->at(i);
+ if (node->op() == SetLocal && node->local() == local) {
+ setLocal = node;
break;
- } else {
- // FIXME: It would be awesome to be able to fold the property storage for
- // these GC-allocated typed arrays. For now it doesn't matter because the
- // most common use-cases for constant typed arrays involve large arrays with
- // aliased buffer views.
- // https://bugs.webkit.org/show_bug.cgi?id=125425
}
+ if (accessesOverlap(m_graph, node, AbstractHeap(Stack, local)))
+ break;
+ }
+
+ if (!setLocal)
+ break;
+
+ // The Flush should become a PhantomLocal at this point. This means that we want the
+ // local's value during OSR, but we don't care if the value is stored to the stack. CPS
+ // rethreading can canonicalize PhantomLocals for us.
+ m_node->convertFlushToPhantomLocal();
+ m_graph.dethread();
+ m_changed = true;
+ break;
+ }
+
+ // FIXME: we should probably do this in constant folding but this currently relies on an OSR exit rule.
+ // https://bugs.webkit.org/show_bug.cgi?id=154832
+ case OverridesHasInstance: {
+ if (!m_node->child2().node()->isCellConstant())
+ break;
+
+ if (m_node->child2().node()->asCell() != m_graph.globalObjectFor(m_node->origin.semantic)->functionProtoHasInstanceSymbolFunction()) {
+ m_graph.convertToConstant(m_node, jsBoolean(true));
+ m_changed = true;
+
+ } else if (!m_graph.hasExitSite(m_node->origin.semantic, BadTypeInfoFlags)) {
+ // We optimistically assume that we will not see a function that has a custom instanceof operation as they should be rare.
+ m_insertionSet.insertNode(m_nodeIndex, SpecNone, CheckTypeInfoFlags, m_node->origin, OpInfo(ImplementsDefaultHasInstance), Edge(m_node->child1().node(), CellUse));
+ m_graph.convertToConstant(m_node, jsBoolean(false));
+ m_changed = true;
}
+
break;
+ }
default:
break;
@@ -145,8 +299,7 @@ private:
void convertToIdentityOverChild(unsigned childIndex)
{
- m_insertionSet.insertNode(
- m_nodeIndex, SpecNone, Phantom, m_node->codeOrigin, m_node->children);
+ m_insertionSet.insertCheck(m_nodeIndex, m_node);
m_node->children.removeEdge(childIndex ^ 1);
m_node->convertToIdentity();
m_changed = true;
@@ -162,20 +315,26 @@ private:
convertToIdentityOverChild(1);
}
- void foldTypedArrayPropertyToConstant(JSArrayBufferView* view, JSValue constant)
+ void handleCommutativity()
{
- prepareToFoldTypedArray(view);
- m_graph.convertToConstant(m_node, constant);
- m_changed = true;
- }
-
- void prepareToFoldTypedArray(JSArrayBufferView* view)
- {
- m_insertionSet.insertNode(
- m_nodeIndex, SpecNone, TypedArrayWatchpoint, m_node->codeOrigin,
- OpInfo(view));
- m_insertionSet.insertNode(
- m_nodeIndex, SpecNone, Phantom, m_node->codeOrigin, m_node->children);
+ // If the right side is a constant then there is nothing left to do.
+ if (m_node->child2()->hasConstant())
+ return;
+
+ // This case ensures that optimizations that look for x + const don't also have
+ // to look for const + x.
+ if (m_node->child1()->hasConstant()) {
+ std::swap(m_node->child1(), m_node->child2());
+ m_changed = true;
+ return;
+ }
+
+ // This case ensures that CSE is commutativity-aware.
+ if (m_node->child1().node() > m_node->child2().node()) {
+ std::swap(m_node->child1(), m_node->child2());
+ m_changed = true;
+ return;
+ }
}
InsertionSet m_insertionSet;