aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/optimizations
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-03-09 14:13:49 +0000
committertelsoa01 <telmo.soares@arm.com>2018-03-09 14:13:49 +0000
commit4fcda0101ec3d110c1d6d7bee5c83416b645528a (patch)
treec9a70aeb2887006160c1b3d265c27efadb7bdbae /src/armnn/optimizations
downloadarmnn-4fcda0101ec3d110c1d6d7bee5c83416b645528a.tar.gz
Release 18.02
Change-Id: Id3c11dc5ee94ef664374a988fcc6901e9a232fa6
Diffstat (limited to 'src/armnn/optimizations')
-rw-r--r--src/armnn/optimizations/All.hpp11
-rw-r--r--src/armnn/optimizations/MovePermuteUp.hpp82
-rw-r--r--src/armnn/optimizations/Optimization.hpp123
-rw-r--r--src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp60
-rw-r--r--src/armnn/optimizations/OptimizeInversePermutes.hpp40
-rw-r--r--src/armnn/optimizations/PermuteAsReshape.hpp70
-rw-r--r--src/armnn/optimizations/SquashEqualSiblings.hpp57
7 files changed, 443 insertions, 0 deletions
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
new file mode 100644
index 0000000000..70f78d44af
--- /dev/null
+++ b/src/armnn/optimizations/All.hpp
@@ -0,0 +1,11 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "OptimizeInversePermutes.hpp"
+#include "PermuteAsReshape.hpp"
+#include "OptimizeConsecutiveReshapes.hpp"
+#include "SquashEqualSiblings.hpp"
+#include "MovePermuteUp.hpp"
diff --git a/src/armnn/optimizations/MovePermuteUp.hpp b/src/armnn/optimizations/MovePermuteUp.hpp
new file mode 100644
index 0000000000..8c59986762
--- /dev/null
+++ b/src/armnn/optimizations/MovePermuteUp.hpp
@@ -0,0 +1,82 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "Optimization.hpp"
+#include "Permute.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+class MovePermuteUpImpl
+{
+public:
+ /// Run for every connection between a base Layer (any) and a child PermuteLayer. If the type
+ /// of the base layer allows it, it moves the permutation to the inputs of the base layer.
+ /// I.e., adds equivalent permutations before the inputs of the base layer and moves the
+ /// connections in the output of the child permute layer to the output of the base layer.
+ void Run(Graph& graph, InputSlot& connection) const
+ {
+ OutputSlot& baseOutput = *connection.GetConnectedOutputSlot();
+
+ if (baseOutput.GetNumConnections() == 1U)
+ {
+ Layer& base = baseOutput.GetOwningLayer();
+
+ if (CanMovePermuteToInputs(base))
+ {
+ auto permute = boost::polymorphic_downcast<PermuteLayer*>(&connection.GetOwningLayer());
+ const PermutationVector& perm = permute->GetPermutation();
+
+ // Insert an equivalent permute before every input of the base layer.
+ for (auto baseInput = base.BeginInputSlots(); baseInput != base.EndInputSlots(); ++baseInput)
+ {
+ // Insert new permute layer.
+ const std::string name = std::string("moved_up-") + permute->GetName();
+ PermuteLayer& permLayer = *graph.InsertNewLayer<PermuteLayer>(*baseInput, perm, name.c_str());
+
+ // Set output tensor info for the new layer.
+ OutputSlot& parentOutput = *permLayer.GetInputSlot(0).GetConnectedOutputSlot();
+ const TensorInfo permOutInfo = armnnUtils::Permuted(parentOutput.GetTensorInfo(), perm);
+ permLayer.GetOutputHandler().SetTensorInfo(permOutInfo);
+ }
+
+ // Set permuted output tensor info
+ const TensorInfo& childOutInfo = permute->GetOutputHandler().GetTensorInfo();
+ base.GetOutputHandler().SetTensorInfo(childOutInfo);
+
+ // Bypass permute. It will be removed as it's left unconnected.
+ permute->GetOutputSlot().MoveAllConnections(base.GetOutputSlot());
+ }
+ }
+ }
+
+protected:
+ MovePermuteUpImpl() = default;
+ ~MovePermuteUpImpl() = default;
+
+private:
+ static bool CanMovePermuteToInputs(const Layer& base)
+ {
+ switch (base.GetType())
+ {
+ case LayerType::Activation:
+ case LayerType::Addition:
+ case LayerType::FakeQuantization:
+ case LayerType::Floor:
+ case LayerType::MemCopy:
+ case LayerType::Multiplication:
+ return true;
+ default:
+ return false;
+ }
+ }
+};
+
+using MovePermuteUp = OptimizeForConnection<Layer, PermuteLayer, MovePermuteUpImpl>;
+
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/optimizations/Optimization.hpp b/src/armnn/optimizations/Optimization.hpp
new file mode 100644
index 0000000000..89e03ff88d
--- /dev/null
+++ b/src/armnn/optimizations/Optimization.hpp
@@ -0,0 +1,123 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "Graph.hpp"
+#include "LayersFwd.hpp"
+
+namespace armnn
+{
+
+class Optimization
+{
+public:
+ virtual void Run(Graph& graph, Graph::Iterator& pos) const = 0;
+protected:
+ ~Optimization() = default;
+};
+
+// Wrappers
+// The implementation of the following wrappers make use of the CRTP C++ idiom
+// (curiously recurring template pattern).
+// For details, see https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern
+
+/// Wrapper Optimization base class that calls Wrapped::Run for every layer of type BaseType.
+/// - Wrapped class mustn't remove the base layer.
+/// - Base layer is removed if left unconnected after applying the wrapped optimization.
+template <typename BaseType, typename Wrapped>
+class OptimizeForTypeImpl : public armnn::Optimization, public Wrapped
+{
+public:
+ using Wrapped::Wrapped;
+
+ void Run(Graph& graph, Graph::Iterator& pos) const override
+ {
+ Layer* const base = *pos;
+
+ if (base->GetType() == LayerEnumOf<BaseType>())
+ {
+ Wrapped::Run(graph, *boost::polymorphic_downcast<BaseType*>(base));
+ }
+ }
+
+protected:
+ ~OptimizeForTypeImpl() = default;
+};
+
+/// Specialization that calls Wrapped::Run for any layer type
+template <typename Wrapped>
+class OptimizeForTypeImpl<Layer, Wrapped> : public armnn::Optimization, public Wrapped
+{
+public:
+ using Wrapped::Wrapped;
+
+ void Run(Graph& graph, Graph::Iterator& pos) const override
+ {
+ Wrapped::Run(graph, **pos);
+ }
+
+protected:
+ ~OptimizeForTypeImpl() = default;
+};
+
+template <typename BaseType, typename Wrapped>
+class OptimizeForType final : public OptimizeForTypeImpl<BaseType, Wrapped>
+{
+public:
+ using OptimizeForTypeImpl<BaseType, Wrapped>::OptimizeForTypeImpl;
+};
+
+/// Wrapper Optimization class that calls Wrapped::Run for every connection BaseType -> ChildType.
+/// - Wrapped class mustn't remove the base layer.
+/// - Wrapped class mustn't affect existing connections in the same output. It might add new ones.
+/// - Base and children layers are removed if left unconnected after applying the wrapped optimization.
+template <typename BaseType, typename ChildType, typename Wrapped>
+class OptimizeForConnectionImpl : public Wrapped
+{
+public:
+ using Wrapped::Wrapped;
+
+ void Run(Graph& graph, BaseType& base) const
+ {
+ for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
+ {
+ for (auto&& childInput : output->GetConnections())
+ {
+ if (childInput->GetOwningLayer().GetType() == LayerEnumOf<ChildType>())
+ {
+ Wrapped::Run(graph, *childInput);
+ }
+ }
+
+ // Remove unconnected children
+ for (unsigned int i = 0; i < output->GetNumConnections();)
+ {
+ Layer* child = &output->GetConnection(i)->GetOwningLayer();
+
+ if (child->IsOutputUnconnected())
+ {
+ graph.EraseLayer(child);
+ }
+ else
+ {
+ ++i;
+ }
+ }
+ }
+ }
+
+protected:
+ ~OptimizeForConnectionImpl() = default;
+};
+
+template <typename BaseType, typename ChildType, typename Wrapped>
+class OptimizeForConnection final
+ : public OptimizeForTypeImpl<BaseType, OptimizeForConnectionImpl<BaseType, ChildType, Wrapped>>
+{
+public:
+ using OptimizeForTypeImpl<BaseType, OptimizeForConnectionImpl<BaseType, ChildType, Wrapped>>::OptimizeForTypeImpl;
+};
+
+} // namespace armnn
diff --git a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
new file mode 100644
index 0000000000..deb49c6884
--- /dev/null
+++ b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
@@ -0,0 +1,60 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "Optimization.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+
+class OptimizeConsecutiveReshapesImpl
+{
+public:
+ /// Run for every connection between a base RashapeLayer and a child ReshapeLayer.
+ /// Inserts an equivalent ReshapeLayer that bypasses both for that connection.
+ void Run(Graph& graph, InputSlot& connection) const
+ {
+ auto& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
+ auto& child = connection.GetOwningLayer();
+
+ BOOST_ASSERT(base.GetType() == LayerType::Reshape);
+ BOOST_ASSERT(child.GetType() == LayerType::Reshape);
+
+ OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
+
+ const TensorInfo& inInfo = parentOut->GetTensorInfo();
+ const TensorInfo& outInfo = child.GetOutputHandler().GetTensorInfo();
+
+ if (inInfo.GetShape() != outInfo.GetShape())
+ {
+ // Insert equivalent reshape before base layer
+ const std::string name = std::string("merged-") + base.GetName() + std::string("-with-") + child.GetName();
+ const ReshapeDescriptor descriptor{outInfo.GetShape()};
+ auto& newReshape = *graph.InsertNewLayer<ReshapeLayer>(base.GetInputSlot(0), descriptor, name.c_str());
+ // Set tensor info for new layer
+ newReshape.GetOutputHandler().SetTensorInfo(outInfo);
+ // Reconnect base with original parent
+ newReshape.GetOutputSlot().MoveAllConnections(*parentOut);
+ // Parent is now the new layer
+ parentOut = &newReshape.GetOutputSlot();
+ }
+
+ // Move connections in child output to parent layer.
+ // Child layer will be removed as it's left unconnected.
+ // Base layer will be removed if left unconnected.
+ child.GetOutputSlot().MoveAllConnections(*parentOut);
+ }
+
+protected:
+ OptimizeConsecutiveReshapesImpl() = default;
+ ~OptimizeConsecutiveReshapesImpl() = default;
+};
+
+using OptimizeConsecutiveReshapes = OptimizeForConnection<ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl>;
+
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp
new file mode 100644
index 0000000000..63820cb7d3
--- /dev/null
+++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp
@@ -0,0 +1,40 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "Optimization.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+
+class OptimizeInversePermutesImpl
+{
+public:
+ /// Run for every connection between a base PermuteLayer and a child PermuteLayer.
+ /// Bypasses both layers for that connection if one is the inverse of the other.
+ void Run(Graph& graph, InputSlot& connection) const
+ {
+ Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
+ auto child = boost::polymorphic_downcast<PermuteLayer*>(&connection.GetOwningLayer());
+
+ if (child->IsInverse(*boost::polymorphic_downcast<PermuteLayer*>(&base)))
+ {
+ // Bypass both layers. Child will be removed as it's left unconnected.
+ // Base layer will be removed if left unconnected.
+ child->GetOutputSlot().MoveAllConnections(*base.GetInputSlot(0).GetConnectedOutputSlot());
+ }
+ }
+
+protected:
+ OptimizeInversePermutesImpl() = default;
+ ~OptimizeInversePermutesImpl() = default;
+};
+
+using OptimizeInversePermutes = OptimizeForConnection<PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl>;
+
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/optimizations/PermuteAsReshape.hpp b/src/armnn/optimizations/PermuteAsReshape.hpp
new file mode 100644
index 0000000000..a8e4c2df5e
--- /dev/null
+++ b/src/armnn/optimizations/PermuteAsReshape.hpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "Optimization.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+
+class PermuteAsReshapeImpl
+{
+public:
+ /// Run for every PermuteLayer. Replaces it with a ReshapeLayer if they are equivalent.
+ void Run(Graph& graph, PermuteLayer& permute) const
+ {
+ if (IsReshape(permute))
+ {
+ const TensorInfo& outInfo = permute.GetOutputHandler().GetTensorInfo();
+
+ const std::string name = std::string("as_reshape-") + permute.GetName();
+ const ReshapeDescriptor descriptor{outInfo.GetShape()};
+ // Insert so layers don't need to be re-sorted
+ auto reshape = graph.InsertNewLayer<ReshapeLayer>(permute.GetInputSlot(0), descriptor, name.c_str());
+ reshape->GetOutputHandler().SetTensorInfo(outInfo);
+
+ // Bypass permute. It will be deleted since it's left unconnected.
+ permute.GetOutputSlot().MoveAllConnections(reshape->GetOutputSlot());
+ }
+ }
+
+protected:
+ PermuteAsReshapeImpl() = default;
+ ~PermuteAsReshapeImpl() = default;
+
+private:
+ static bool IsReshape(const PermuteLayer& layer)
+ {
+ const TensorShape& outShape = layer.GetOutputHandler().GetTensorInfo().GetShape();
+ const PermutationVector& permutation = layer.GetPermutation();
+
+ const unsigned int numDimensions = permutation.GetSize();
+
+ unsigned int lastGtOne = 0;
+ while ((lastGtOne < numDimensions) && (outShape[(permutation[lastGtOne])] == 1U))
+ {
+ ++lastGtOne;
+ }
+
+ bool isReshape = true;
+ for (unsigned int i = lastGtOne + 1U; isReshape && (i < numDimensions); ++i)
+ {
+ if (outShape[permutation[i]] > 1U)
+ {
+ isReshape = permutation[lastGtOne] < permutation[i];
+ lastGtOne = i;
+ }
+ }
+
+ return isReshape;
+ }
+};
+
+using PermuteAsReshape = OptimizeForType<PermuteLayer, PermuteAsReshapeImpl>;
+
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp
new file mode 100644
index 0000000000..2dfe91fdcc
--- /dev/null
+++ b/src/armnn/optimizations/SquashEqualSiblings.hpp
@@ -0,0 +1,57 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include "Optimization.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+
+template <typename Comparable>
+class SquashEqualSiblingsImpl
+{
+public:
+ /// Run for every connection between a base Layer (any) and a child ComparableLayer.
+ /// For all siblings of the child layer that compare equal to it, bypasses and removes
+ /// them. I.e., moves the connections in the outputs of the siblings to the outputs of
+ /// the child layer, so the siblings are left unconnected (and later removed).
+ void Run(Graph& graph, InputSlot& connection) const
+ {
+ auto& child = connection.GetOwningLayer();
+
+ if (!child.IsOutputUnconnected())
+ {
+ OutputSlot& baseOutput = *connection.GetConnectedOutputSlot();
+ auto& comparableChild = *boost::polymorphic_downcast<Comparable*>(&child);
+
+ for (auto&& it : baseOutput.GetConnections())
+ {
+ Layer& sibling = it->GetOwningLayer();
+ if ((&sibling != &child) && comparableChild.IsEqual(sibling))
+ {
+ // Bypass sibling. It will be removed as it's left unconnected.
+ auto siblingOut = sibling.BeginOutputSlots();
+ for (auto childOut = child.BeginOutputSlots(); childOut != child.EndOutputSlots(); ++childOut)
+ {
+ siblingOut->MoveAllConnections(*childOut);
+ ++siblingOut;
+ }
+ }
+ }
+ }
+ }
+
+protected:
+ SquashEqualSiblingsImpl() = default;
+ ~SquashEqualSiblingsImpl() = default;
+};
+
+using SquashEqualPermuteSiblings = OptimizeForConnection<Layer, PermuteLayer, SquashEqualSiblingsImpl<PermuteLayer>>;
+using SquashEqualReshapeSiblings = OptimizeForConnection<Layer, ReshapeLayer, SquashEqualSiblingsImpl<ReshapeLayer>>;
+
+} // namespace optimizations
+} // namespace armnn