aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-03-29 16:29:27 +0100
committersurmeh01 <surabhi.mehta@arm.com>2018-03-29 16:29:27 +0100
commitbceff2fb3fc68bb0aa88b886900c34b77340c826 (patch)
treed867d3e090d58d3012dfbbac456e9ea8c7f789bc /src/armnn
parent4fcda0101ec3d110c1d6d7bee5c83416b645528a (diff)
downloadarmnn-bceff2fb3fc68bb0aa88b886900c34b77340c826.tar.gz
Release 18.03
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Graph.cpp77
-rw-r--r--src/armnn/Graph.hpp100
-rw-r--r--src/armnn/Layer.cpp11
-rw-r--r--src/armnn/Layer.hpp10
-rw-r--r--src/armnn/Layers.cpp69
-rw-r--r--src/armnn/Layers.hpp9
-rw-r--r--src/armnn/Network.cpp7
-rw-r--r--src/armnn/Network.hpp1
-rw-r--r--src/armnn/Optimizer.cpp26
-rw-r--r--src/armnn/Optimizer.hpp5
-rw-r--r--src/armnn/Runtime.cpp30
-rw-r--r--src/armnn/Runtime.hpp2
-rw-r--r--src/armnn/SerializeLayerParameters.cpp156
-rw-r--r--src/armnn/SerializeLayerParameters.hpp73
-rw-r--r--src/armnn/backends/ArmComputeTensorUtils.cpp7
-rw-r--r--src/armnn/backends/ClWorkloadFactory.cpp67
-rw-r--r--src/armnn/backends/ClWorkloadFactory.hpp11
-rw-r--r--src/armnn/backends/NeonLayerSupport.cpp26
-rw-r--r--src/armnn/backends/NeonWorkloadFactory.cpp2
-rw-r--r--src/armnn/backends/NeonWorkloads.hpp2
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp4
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp2
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp7
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp33
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp27
-rw-r--r--src/armnn/backends/RefWorkloads/Addition.cpp6
-rw-r--r--src/armnn/backends/RefWorkloads/Merger.hpp1
-rw-r--r--src/armnn/backends/RefWorkloads/Multiplication.cpp42
-rw-r--r--src/armnn/backends/RefWorkloads/Multiplication.hpp12
-rw-r--r--src/armnn/backends/RefWorkloads/Pooling2d.cpp4
-rw-r--r--src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp7
-rw-r--r--src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp7
-rw-r--r--src/armnn/backends/RefWorkloads/Splitter.hpp1
-rw-r--r--src/armnn/backends/WorkloadData.cpp17
-rw-r--r--src/armnn/backends/test/ArmComputeCl.cpp13
-rw-r--r--src/armnn/backends/test/ArmComputeNeon.cpp10
-rw-r--r--src/armnn/backends/test/LayerTests.cpp322
-rw-r--r--src/armnn/backends/test/LayerTests.hpp9
-rw-r--r--src/armnn/backends/test/PermuteTestImpl.hpp104
-rw-r--r--src/armnn/backends/test/Pooling2dTestImpl.hpp77
-rw-r--r--src/armnn/backends/test/Reference.cpp11
-rw-r--r--src/armnn/optimizations/Optimization.hpp27
-rw-r--r--src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp4
-rw-r--r--src/armnn/optimizations/SquashEqualSiblings.hpp28
-rw-r--r--src/armnn/test/Network_test.cpp58
-rw-r--r--src/armnn/test/OptimizerTests.cpp334
-rw-r--r--src/armnn/test/RuntimeTests.cpp15
47 files changed, 1645 insertions, 228 deletions
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 97f702e50f..af3b17ea8b 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -14,6 +14,9 @@
#include <boost/format.hpp>
#include <unordered_map>
+#include <DotSerializer.hpp>
+#include <sstream>
+
namespace armnn
{
@@ -71,6 +74,80 @@ Status Graph::Print() const
return Status::Success;
}
+Status Graph::SerializeToDot(std::ostream& stream)
+{
+ {
+ DotGraph graph(stream, "Optimized");
+
+ {
+ // Default node attributes:
+ DotDefaults nodes(stream, "node");
+ nodes.GetAttributeSet()
+ .AddAttribute("shape", "record");
+ }
+
+ {
+ // Default edge attributes:
+ DotDefaults edges(stream, "edge");
+ edges.GetAttributeSet()
+ .AddAttribute("fontsize", 8)
+ .AddAttribute("fontcolor", "blue")
+ .AddAttribute("fontname", "arial-bold");
+ }
+
+ // First declare the nodes
+ for (auto&& layer : m_Layers)
+ {
+ DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
+ // Extract the layer parameters
+ ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
+ node.GetContents().AddContent(name + " : " + value);
+ };
+ layer->SerializeLayerParameters(extractParams);
+ }
+
+ // Second declare the edges
+ for (auto&& layer : m_Layers)
+ {
+ LayerGuid toId = layer->GetGuid();
+
+ for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
+ {
+ OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
+ LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
+ DotEdge edge(stream, fromId, toId);
+
+ // Now Print the tensor shape on the edge
+ {
+ // Construct the label attribute with HTML markup
+ std::stringstream ss;
+ {
+ ss << "< [";
+ const TensorShape& shape = outputSlot->GetTensorInfo().GetShape();
+ for (unsigned int i = 0; i < shape.GetNumDimensions(); i++)
+ {
+ if (i != 0)
+ {
+ ss << ",";
+ }
+ ss << shape[i];
+ }
+ ss << "] >";
+ }
+
+ edge.GetAttributeSet().AddAttribute("label", ss);
+ }
+ }
+ }
+ }
+
+ if (stream.bad())
+ {
+ return Status::Failure;
+ }
+ return Status::Success;
+}
+
Status Graph::AllocateDynamicBuffers()
{
for (auto&& layer : m_Layers)
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 8888034197..34aefbf085 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -92,6 +92,8 @@ public:
Status Print() const;
+ Status SerializeToDot(std::ostream& stream);
+
/// Adds a new layer of type LaterType to the graph constructed with the arguments passed.
template <typename LayerT, typename... Args>
LayerT* AddLayer(Args&&... args);
@@ -121,6 +123,11 @@ public:
/// Return const iterator pointing to end of list. Lowercase for range-based for loops.
ConstIterator end() const { return {m_Layers.end(), &PtrCast<const Layer>}; }
+ /// Return const iterator pointing to begin of list. Lowercase for range-based for loops.
+ ConstIterator cbegin() const { return begin(); }
+ /// Return const iterator pointing to end of list. Lowercase for range-based for loops.
+ ConstIterator cend() const { return end(); }
+
/// Sort layers in topological order and return this.
Graph& TopologicalSort() { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }
const Graph& TopologicalSort() const;
@@ -154,13 +161,27 @@ private:
template <typename LayerT>
class LayerInGraph;
+ Iterator ForwardToEndOfInputs(Iterator it) const
+ {
+ while ((it != m_Layers.end()) && ((*it)->GetType() == LayerType::Input))
+ {
+ ++it;
+ }
+ return it;
+ }
+
+ Iterator RewindToBeginOfOutputs(Iterator it) const
+ {
+ while ((it != m_Layers.begin()) && ((*std::prev(it))->GetType() == LayerType::Output))
+ {
+ --it;
+ }
+ return it;
+ }
+
/// Get the position of a layer in the graph.
Iterator GetPosInGraph(Layer& layer);
- /// Adds a new layer of type LaterType to the graph constructed with the arguments passed.
- template <typename LayerT, typename... Args>
- LayerInGraph<LayerT>* AddLayerImpl(Iterator insertBefore, Args&&... args);
-
std::unordered_set<LayerBindingId> m_InputIds;
std::unordered_set<LayerBindingId> m_OutputIds;
std::unordered_map<const Layer*, Iterator> m_PosInGraphMap;
@@ -197,8 +218,19 @@ class Graph::LayerInGraph final : public LayerInGraphBase<LayerT>
{
public:
template <typename... Args>
+ LayerInGraph(Graph& graph, Args&&... args)
+ : LayerInGraphBase<LayerT>(graph,
+ // Insert at the back of the intermediate layers (before outputs).
+ std::prev(graph.end(), IteratorDifference(graph.GetNumOutputs())),
+ std::forward<Args>(args)...)
+ {
+ }
+ template <typename... Args>
LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args)
- : LayerInGraphBase<LayerT>(graph, insertBefore, std::forward<Args>(args)...)
+ : LayerInGraphBase<LayerT>(graph,
+ // Make sure it's inserted after all inputs and before all outputs.
+ graph.ForwardToEndOfInputs(graph.RewindToBeginOfOutputs(insertBefore)),
+ std::forward<Args>(args)...)
{
}
};
@@ -209,8 +241,11 @@ class Graph::LayerInGraph<InputLayer> final : public LayerInGraphBase<InputLayer
{
public:
template <typename... Args>
- LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args)
- : LayerInGraphBase<InputLayer>(graph, insertBefore, std::forward<Args>(args)...)
+ LayerInGraph(Graph& graph, Args&&... args)
+ : LayerInGraphBase<InputLayer>(graph,
+ // Always add to the back of the inputs.
+ std::next(graph.begin(), IteratorDifference(graph.GetNumInputs())),
+ std::forward<Args>(args)...)
{
const bool isNewId = m_Graph.m_InputIds.emplace(GetBindingId()).second;
if (!isNewId)
@@ -218,6 +253,12 @@ public:
throw InvalidArgumentException("A layer already exists with the specified id");
}
}
+ template <typename... Args>
+ LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args)
+ // Ignore insertBefore. Always add to the back of the inputs.
+ : LayerInGraph(graph, std::forward<Args>(args)...)
+ {
+ }
~LayerInGraph() override
{
const size_t numErased = m_Graph.m_InputIds.erase(GetBindingId());
@@ -232,8 +273,11 @@ class Graph::LayerInGraph<OutputLayer> final : public LayerInGraphBase<OutputLay
{
public:
template <typename... Args>
- LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args)
- : LayerInGraphBase<OutputLayer>(graph, insertBefore, std::forward<Args>(args)...)
+ LayerInGraph(Graph& graph, Args&&... args)
+ : LayerInGraphBase<OutputLayer>(graph,
+ // Always add to the back of the outputs.
+ graph.end(),
+ std::forward<Args>(args)...)
{
const bool isNewId = m_Graph.m_OutputIds.emplace(GetBindingId()).second;
if (!isNewId)
@@ -257,42 +301,22 @@ inline Graph::Iterator Graph::GetPosInGraph(Layer& layer)
}
template <typename LayerT, typename... Args>
-inline Graph::LayerInGraph<LayerT>* Graph::AddLayerImpl(Iterator insertBefore, Args&&... args)
-{
- return new LayerInGraph<LayerT>(*this, insertBefore, std::forward<Args>(args)...);
-}
-
-/// Inputs are inserted at the front of the list, to keep the order correct if the list is sorted.
-/// Outputs are inserted at the back of the list, to keep the order correct if the list is sorted.
-/// Other layers are inserted before existing outputs, so the latter remain at the back of the list.
-template <typename LayerT, typename... Args>
inline LayerT* Graph::AddLayer(Args&&... args)
{
- switch (LayerEnumOf<LayerT>())
- {
- case LayerType::Input:
- {
- return AddLayerImpl<LayerT>(begin(), std::forward<Args>(args)...);
- }
- case LayerType::Output:
- {
- return AddLayerImpl<LayerT>(end(), std::forward<Args>(args)...);
- }
- default:
- {
- m_LayersInOrder = false;
- const auto pos = std::prev(end(), IteratorDifference(GetNumOutputs()));
- return AddLayerImpl<LayerT>(pos, std::forward<Args>(args)...);
- }
- }
+ m_LayersInOrder = m_LayersInOrder &&
+ ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
+ return new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
}
template <typename LayerT, typename... Args>
inline LayerT* Graph::InsertNewLayer(InputSlot& insertBefore, Args&&... args)
{
- // Insert before the child layer so topological order is kept.
- const Iterator pos = GetPosInGraph(insertBefore.GetOwningLayer());
- LayerT* const layer = AddLayerImpl<LayerT>(pos, std::forward<Args>(args)...);
+ // Insert after the parent if any, or before the child otherwise, so topological order is kept.
+ OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
+ const Iterator pos = (parentOut != nullptr)
+ ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
+ : GetPosInGraph(insertBefore.GetOwningLayer());
+ LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
insertBefore.Insert(*layer);
return layer;
}
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 20a8ba4926..fcf0656aeb 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -18,7 +18,6 @@ namespace armnn
void InputSlot::Insert(Layer& layer)
{
- BOOST_ASSERT(layer.GetNumInputSlots() <= 1);
BOOST_ASSERT(layer.GetNumOutputSlots() == 1);
OutputSlot* const prevSlot = GetConnectedOutputSlot();
@@ -115,11 +114,21 @@ void OutputSlot::ValidateConnectionIndex(unsigned int index) const
}
}
+namespace {
+LayerGuid GenerateLayerGuid()
+{
+ //Note: Not thread safe.
+ static LayerGuid newGuid=0;
+ return newGuid++;
+}
+} //namespace
+
Layer::Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name)
: m_OutputHandlers(numOutputSlots)
, m_LayerName(name ? name : "")
, m_Type(type)
, m_ComputeDevice(Compute::Undefined)
+, m_Guid(GenerateLayerGuid())
{
m_InputSlots.reserve(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; ++i)
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 1160f0ab09..f9f2f22bea 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -10,6 +10,7 @@
#include "backends/WorkloadDataCollector.hpp"
#include "backends/WorkloadInfo.hpp"
#include "InternalTypes.hpp"
+#include "SerializeLayerParameters.hpp"
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
@@ -218,6 +219,10 @@ public:
virtual void ValidateTensorShapesFromInputs() = 0;
+ /// Helper to serialize the layer parameters to string
+ /// (currently used in DotSerializer and company)
+ virtual void SerializeLayerParameters(ParameterStringifyFunction & fn) const {}
+
// IConnectableLayer
const char* GetName() const override { return m_LayerName.c_str(); }
@@ -230,6 +235,9 @@ public:
const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); }
OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); }
+ void SetGuid(LayerGuid guid) { m_Guid = guid; }
+ LayerGuid GetGuid() const final { return m_Guid; }
+
protected:
// Graph needs access to the virtual destructor
friend class Graph;
@@ -281,6 +289,8 @@ private:
/// Used for sorting
mutable LayerPriority m_Priority = 0;
mutable bool m_Visiting = false;
+
+ LayerGuid m_Guid;
};
// A layer user-provided data can be bound to (e.g. inputs, outputs)
diff --git a/src/armnn/Layers.cpp b/src/armnn/Layers.cpp
index ddbc7d222c..48a02aba9c 100644
--- a/src/armnn/Layers.cpp
+++ b/src/armnn/Layers.cpp
@@ -11,6 +11,8 @@
#include "Permute.hpp"
+#include <queue>
+
namespace armnn
{
@@ -21,6 +23,7 @@ LayerType* Layer::CloneBase(Graph& graph, Params&& ... params) const
LayerType* const layer = graph.AddLayer<LayerType>(std::forward<Params>(params)...);
layer->SetComputeDevice(m_ComputeDevice);
+ layer->SetGuid(GetGuid());
return layer;
}
@@ -82,12 +85,11 @@ void AdditionLayer::ValidateTensorShapesFromInputs()
unsigned int dim1 = input1.GetShape()[i];
if (dim0 != dim1)
{
- BOOST_ASSERT_MSG(dim0 == 1 || dim1 == 1, "Dimensions should either match or one should be one length");
+ BOOST_ASSERT_MSG(dim0 == 1 || dim1 == 1, "Dimensions should either match or one should be of size 1.");
}
}
#endif
-
for (unsigned int i = 0; i < numDims; i++)
{
unsigned int dim0 = input0.GetShape()[i];
@@ -439,14 +441,31 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact
m_OutputHandlers[0].CreateTensorHandles(factory);
if (factory.SupportsSubTensors())
{
- const unsigned int numInputSlots = GetNumInputSlots();
- for (unsigned int i = 0; i < numInputSlots; ++i)
+ std::queue<MergerLayer*> m_MergerLayers;
+
+ m_MergerLayers.push(this);
+ while (!m_MergerLayers.empty())
{
- OutputHandler& outputHandler = GetInputSlot(i).GetConnectedOutputSlot()->GetOutputHandler();
+ MergerLayer* currentLayer = m_MergerLayers.front();
+ ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
- outputHandler.SetData(factory.CreateSubTensorHandle(*m_OutputHandlers[0].GetData(),
- outputHandler.GetTensorInfo().GetShape(),
- m_Param.GetViewOrigin(i)));
+ m_MergerLayers.pop();
+
+ const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
+ for (unsigned int i = 0; i < numInputSlots; ++i)
+ {
+ OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
+ OutputHandler& outputHandler = slot->GetOutputHandler();
+ outputHandler.SetData(factory.CreateSubTensorHandle(*parentTensor,
+ outputHandler.GetTensorInfo().GetShape(),
+ currentLayer->m_Param.GetViewOrigin(i)));
+
+ Layer& inputLayer = slot->GetOwningLayer();
+ if (inputLayer.GetType() == LayerType::Merger)
+ {
+ m_MergerLayers.push(boost::polymorphic_downcast<MergerLayer*>(&inputLayer));
+ }
+ }
}
}
}
@@ -568,12 +587,36 @@ MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
void MultiplicationLayer::ValidateTensorShapesFromInputs()
{
- ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() ==
- GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(),
- "MultiplicationLayer: Inputs must match");
+ auto& input0 = GetInputSlot(0).GetConnection()->GetTensorInfo();
+ auto& input1 = GetInputSlot(1).GetConnection()->GetTensorInfo();
+
+ // Get the max of the inputs
+ BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ unsigned int numDims = input0.GetNumDimensions();
+ std::vector<unsigned int> dims(numDims);
+
+ // validate inputs are broadcast compatible
+#if !NDEBUG
+ for (unsigned int i = 0; i < numDims; i++)
+ {
+ unsigned int dim0 = input0.GetShape()[i];
+ unsigned int dim1 = input1.GetShape()[i];
+ if (dim0 != dim1)
+ {
+ BOOST_ASSERT_MSG(dim0 == 1 || dim1 == 1, "Dimensions should either match or one should be of size 1.");
+ }
+ }
+#endif
- TensorInfo infoOut(GetInputSlot(0).GetConnection()->GetTensorInfo());
- ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(infoOut.GetShape()),
+ for (unsigned int i = 0; i < numDims; i++)
+ {
+ unsigned int dim0 = input0.GetShape()[i];
+ unsigned int dim1 = input1.GetShape()[i];
+ dims[i] = std::max(dim0, dim1);
+ }
+
+ TensorShape outShape(numDims, dims.data());
+ ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape),
"MultiplicationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.");
}
diff --git a/src/armnn/Layers.hpp b/src/armnn/Layers.hpp
index 5a1e3ca063..cb460e125f 100644
--- a/src/armnn/Layers.hpp
+++ b/src/armnn/Layers.hpp
@@ -22,10 +22,17 @@ template <typename Parameters>
class LayerWithParameters : public Layer
{
public:
- typedef Parameters DescriptorType;
+ using DescriptorType = Parameters;
const Parameters& GetParameters() const { return m_Param; }
+ /// Helper to serialize the layer parameters to string
+ /// (currently used in DotSerializer and company)
+ void SerializeLayerParameters(ParameterStringifyFunction & fn) const
+ {
+ StringifyLayerParameters<Parameters>::Serialize(fn, m_Param);
+ }
+
protected:
LayerWithParameters(unsigned int numInputSlots,
unsigned int numOutputSlots,
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4ee68b3c48..77390cb0a4 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -58,6 +58,11 @@ Status OptimizedNetwork::PrintGraph()
return Status::Success;
}
+Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
+{
+ return m_Graph->SerializeToDot(stream);
+}
+
IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, const DeviceSpec& deviceSpec)
{
const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork);
@@ -65,7 +70,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, const DeviceSpec& devic
OptimizedNetwork* optNet = new OptimizedNetwork(std::move(graph));
- Optimizer::Get().Optimize(optNet->GetGraph());
+ Optimizer::Optimize(optNet->GetGraph());
// Infer the tensor infos for all output slots. Throws an exception on failure.
optNet->GetGraph().InferTensorInfos();
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index de0c1ecf2f..4eb67b1a15 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -135,6 +135,7 @@ public:
~OptimizedNetwork();
Status PrintGraph() override;
+ Status SerializeToDot(std::ostream& stream) const override;
Graph& GetGraph() { return *m_Graph; }
diff --git a/src/armnn/Optimizer.cpp b/src/armnn/Optimizer.cpp
index 85b9f2803c..9b76c7fa72 100644
--- a/src/armnn/Optimizer.cpp
+++ b/src/armnn/Optimizer.cpp
@@ -8,7 +8,7 @@
namespace armnn
{
-const Optimizer& Optimizer::Get()
+Optimizer::Optimizer()
{
// Add optimizations here
static optimizations::SquashEqualPermuteSiblings squashEqualPermuteSiblings;
@@ -19,28 +19,26 @@ const Optimizer& Optimizer::Get()
static optimizations::OptimizeConsecutiveReshapes optimizeConsecutiveReshapes;
// Set optimizations in desired order
- static const Optimizer optimizer({
- &squashEqualPermuteSiblings,
- &squashEqualReshapeSiblings,
- &optimizeInversePermutes,
- &movePermuteUp,
- &permuteAsReshape,
- &optimizeConsecutiveReshapes,
- });
-
- return optimizer;
+ m_Optimizations = {&squashEqualPermuteSiblings,
+ &squashEqualReshapeSiblings,
+ &optimizeInversePermutes,
+ &movePermuteUp,
+ &permuteAsReshape,
+ &optimizeConsecutiveReshapes,
+ };
}
-void Optimizer::Optimize(Graph& graph) const
+void Optimizer::Optimize(Graph& graph)
{
+ Optimizer optimizer;
auto it = graph.TopologicalSort().end();
// Call TopologicalSort() in every iteration to re-order the list in case layers where added/removed.
while (it != graph.TopologicalSort().begin())
{
--it;
- for (auto&& optimization : m_Optimizations)
+ for (auto&& optimization : optimizer.m_Optimizations)
{
- optimization->Run(graph, it);
+ optimization->Run(graph, **it);
if ((*it)->IsOutputUnconnected())
{
diff --git a/src/armnn/Optimizer.hpp b/src/armnn/Optimizer.hpp
index 262f264c28..1f5ed026fb 100644
--- a/src/armnn/Optimizer.hpp
+++ b/src/armnn/Optimizer.hpp
@@ -15,14 +15,13 @@ class Optimization;
class Optimizer
{
public:
- static const Optimizer& Get();
- void Optimize(Graph& graph) const;
+ static void Optimize(Graph& graph);
private:
~Optimizer() = default;
- Optimizer(std::initializer_list<Optimization*> optimizations) : m_Optimizations(optimizations) {}
+ Optimizer();
std::vector<Optimization*> m_Optimizations;
};
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index ea6d19bd31..e0d6a9add0 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -9,6 +9,7 @@
#ifdef ARMCOMPUTECL_ENABLED
#include <arm_compute/core/CL/OpenCL.h>
#include <arm_compute/core/CL/CLKernelLibrary.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
#endif
#include <boost/log/trivial.hpp>
@@ -58,18 +59,26 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut, IOptimizedNetworkPtr inNetw
m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
return Status::Success;
-
}
Status Runtime::UnloadNetwork(NetworkId networkId)
{
+#ifdef ARMCOMPUTECL_ENABLED
+ if (arm_compute::CLScheduler::get().context()() != NULL)
+ {
+ arm_compute::CLScheduler::get().sync();
+ }
+#endif
if (m_LoadedNetworks.erase(networkId) == 0)
{
BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!";
return Status::Failure;
}
#ifdef ARMCOMPUTECL_ENABLED
- arm_compute::CLKernelLibrary::get().clear_programs_cache();
+ if (arm_compute::CLScheduler::get().context()() != NULL && m_LoadedNetworks.empty())
+ {
+ m_WorkloadFactories.m_GpuAcc.get()->LoadOpenClRuntime();
+ }
#endif
BOOST_LOG_TRIVIAL(debug) << "Runtime::UnloadNetwork(): Unloaded network with ID: " << networkId;
return Status::Success;
@@ -87,11 +96,24 @@ Runtime::Runtime(const CreationOptions& options)
m_WorkloadFactories.m_CpuRef = make_shared<RefWorkloadFactory>(
options.m_DefaultComputeDevice == Compute::CpuRef ? true : options.m_UseCpuRefAsFallback);
m_WorkloadFactories.m_CpuAcc = make_shared<NeonWorkloadFactory>();
- m_WorkloadFactories.m_GpuAcc = make_shared<ClWorkloadFactory>();
+ m_WorkloadFactories.m_GpuAcc = make_shared<ClWorkloadFactory>(options.m_ClTunedParameters);
if (options.m_DefaultComputeDevice == Compute::GpuAcc)
{
- m_WorkloadFactories.m_GpuAcc.get()->LoadOpenClRuntime(options.m_ClTunedParameters);
+ m_WorkloadFactories.m_GpuAcc.get()->LoadOpenClRuntime();
+ }
+}
+
+Runtime::~Runtime()
+{
+ std::vector<int> networkIDs;
+ std::transform(m_LoadedNetworks.begin(), m_LoadedNetworks.end(),
+ std::back_inserter(networkIDs),
+ [](const auto &pair) { return pair.first; });
+
+ for (auto networkID : networkIDs)
+ {
+ UnloadNetwork(networkID);
}
}
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index d3f3a578f3..86fd48d6d2 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -56,6 +56,8 @@ public:
/// it cannot be setup for some reason.
Runtime(const CreationOptions& options);
+ ~Runtime();
+
private:
friend void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime); // see RuntimeTests.cpp
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
new file mode 100644
index 0000000000..e8c2bba29b
--- /dev/null
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -0,0 +1,156 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "SerializeLayerParameters.hpp"
+#include <armnn/TypesUtils.hpp>
+#include <string>
+#include <iostream>
+#include <sstream>
+
+namespace armnn
+{
+
+void
+StringifyLayerParameters<PermuteDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const PermuteDescriptor & desc)
+{
+ std::stringstream ss;
+ ss << "[";
+ bool addComma = false;
+ for (auto it=desc.m_DimMappings.begin(); it!= desc.m_DimMappings.end(); ++it)
+ {
+ if (addComma)
+ {
+ ss << ",";
+ }
+ ss << *it;
+ addComma = true;
+ }
+ ss << "]";
+
+ fn("DimMappings",ss.str());
+}
+
+void
+StringifyLayerParameters<ReshapeDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const ReshapeDescriptor & desc)
+{
+ std::stringstream ss;
+ ss << "[";
+ bool addComma = false;
+ for (unsigned int i=0; i<desc.m_TargetShape.GetNumDimensions(); ++i)
+ {
+ if (addComma)
+ {
+ ss << ",";
+ }
+ ss << desc.m_TargetShape[i];
+ addComma = true;
+ }
+ ss << "]";
+
+ fn("TargetShape",ss.str());
+}
+
+void
+StringifyLayerParameters<ActivationDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const ActivationDescriptor & desc)
+{
+ fn("Function",GetActivationFunctionAsCString(desc.m_Function));
+ fn("A",std::to_string(desc.m_A));
+ fn("B",std::to_string(desc.m_B));
+}
+
+void
+StringifyLayerParameters<Convolution2dDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const Convolution2dDescriptor & desc)
+{
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_PadTop << "," << desc.m_PadLeft
+ << "," << desc.m_PadBottom << "," << desc.m_PadRight << ")";
+ fn("Padding(T,L,B,R)",ss.str());
+ }
+
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_StrideX << "," << desc.m_StrideY << ")";
+ fn("Stride(X,Y)", ss.str());
+ }
+
+ fn("BiasEnabled",(desc.m_BiasEnabled?"true":"false"));
+}
+
+void
+StringifyLayerParameters<BatchNormalizationDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const BatchNormalizationDescriptor & desc)
+{
+ fn("Eps",std::to_string(desc.m_Eps));
+}
+
+void
+StringifyLayerParameters<DepthwiseConvolution2dDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const DepthwiseConvolution2dDescriptor & desc)
+{
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_PadTop << "," << desc.m_PadLeft
+ << "," << desc.m_PadBottom << "," << desc.m_PadRight << ")";
+ fn("Padding(T,L,B,R)",ss.str());
+ }
+
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_StrideX << "," << desc.m_StrideY << ")";
+ fn("Stride(X,Y)", ss.str());
+ }
+
+ fn("BiasEnabled",(desc.m_BiasEnabled?"true":"false"));
+}
+
+void
+StringifyLayerParameters<Pooling2dDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const Pooling2dDescriptor & desc)
+{
+ fn("Type", GetPoolingAlgorithmAsCString(desc.m_PoolType));
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_PadTop << "," << desc.m_PadLeft
+ << "," << desc.m_PadBottom << "," << desc.m_PadRight << ")";
+ fn("Padding(T,L,B,R)",ss.str());
+ }
+
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_PoolWidth << "," << desc.m_PoolHeight << ")";
+ fn("(Width,Height)",ss.str());
+ }
+
+ {
+ std::stringstream ss;
+ ss << "(" << desc.m_StrideX << "," << desc.m_StrideY << ")";
+ fn("Stride(X,Y)", ss.str());
+ }
+
+ fn("OutputShapeRounding", GetOutputShapeRoundingAsCString(desc.m_OutputShapeRounding));
+ fn("PaddingMethod", GetPaddingMethodAsCString(desc.m_PaddingMethod));
+}
+
+void
+StringifyLayerParameters<SoftmaxDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const SoftmaxDescriptor & desc)
+{
+ fn("Beta", std::to_string(desc.m_Beta));
+}
+
+void
+StringifyLayerParameters<FullyConnectedDescriptor>::Serialize(ParameterStringifyFunction & fn,
+ const FullyConnectedDescriptor & desc)
+{
+ fn("BiasEnabled", (desc.m_BiasEnabled?"true":"false"));
+ fn("TransposeWeightMatrix", (desc.m_TransposeWeightMatrix?"true":"false"));
+}
+
+
+}
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
new file mode 100644
index 0000000000..b00816067d
--- /dev/null
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -0,0 +1,73 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#pragma once
+
+#include <string>
+#include <functional>
+#include <armnn/Descriptors.hpp>
+
+namespace armnn
+{
+
+using ParameterStringifyFunction = std::function<void(const std::string & name, const std::string & value)>;
+
+///
+/// StringifyLayerParameters allows serializing layer parameters to string.
+/// The default implementation is a no-op because this operation is considered
+/// non-vital for ArmNN and thus we allow adding new layer parameters without
+/// supplying the corresponding stringify functionality.
+///
+template <typename LayerParameter>
+struct StringifyLayerParameters
+{
+ static void Serialize(ParameterStringifyFunction &, const LayerParameter &) {}
+};
+
+template <> struct StringifyLayerParameters<PermuteDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const PermuteDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<ReshapeDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const ReshapeDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<ActivationDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const ActivationDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<Convolution2dDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const Convolution2dDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<BatchNormalizationDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const BatchNormalizationDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<DepthwiseConvolution2dDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const DepthwiseConvolution2dDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<Pooling2dDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const Pooling2dDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<SoftmaxDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const SoftmaxDescriptor & desc);
+};
+
+template <> struct StringifyLayerParameters<FullyConnectedDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction & fn, const FullyConnectedDescriptor & desc);
+};
+
+} \ No newline at end of file
diff --git a/src/armnn/backends/ArmComputeTensorUtils.cpp b/src/armnn/backends/ArmComputeTensorUtils.cpp
index 9f21c41a2f..f88ed2b4c3 100644
--- a/src/armnn/backends/ArmComputeTensorUtils.cpp
+++ b/src/armnn/backends/ArmComputeTensorUtils.cpp
@@ -78,6 +78,7 @@ arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDes
using arm_compute::DimensionRoundingType;
using arm_compute::PadStrideInfo;
using arm_compute::PoolingLayerInfo;
+ using arm_compute::Size2D;
// Resolve ARM Compute layer parameters
const PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType);
@@ -94,7 +95,9 @@ arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDes
const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude);
- return arm_compute::PoolingLayerInfo(poolingType, descriptor.m_PoolWidth, padStrideInfo, excludePadding);
+ const Size2D poolSize(descriptor.m_PoolWidth, descriptor.m_PoolHeight);
+
+ return arm_compute::PoolingLayerInfo(poolingType, poolSize, padStrideInfo, excludePadding);
}
arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& descriptor)
@@ -114,7 +117,7 @@ arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::Per
arm_compute::PermutationVector aclPerm;
unsigned int start = 0;
- while ((start == perm[start]) && (start < perm.GetSize()))
+ while ((start < perm.GetSize()) && (start == perm[start]))
{
++start;
}
diff --git a/src/armnn/backends/ClWorkloadFactory.cpp b/src/armnn/backends/ClWorkloadFactory.cpp
index 4e565a05d7..6af657b6b4 100644
--- a/src/armnn/backends/ClWorkloadFactory.cpp
+++ b/src/armnn/backends/ClWorkloadFactory.cpp
@@ -35,24 +35,62 @@ bool ClWorkloadFactory::IsLayerSupported(const Layer& layer, DataType dataType,
#ifdef ARMCOMPUTECL_ENABLED
-void ClWorkloadFactory::LoadOpenClRuntime(IClTunedParameters* clTunedParameters)
+ClWorkloadFactory::ClWorkloadFactory(IClTunedParameters* clTunedParameters):
+ m_clTunedParameters(boost::polymorphic_downcast<ClTunedParameters*>(clTunedParameters))
{
- ClTunedParameters* clTunedParametersImpl = boost::polymorphic_downcast<ClTunedParameters*>(clTunedParameters);
+ try
+ {
+ std::vector<cl::Platform> platforms;
+ cl::Platform::get(&platforms);
+
+ // Select default platform as the first element
+ cl::Platform::setDefault(platforms[0]);
+
+ std::vector<cl::Device> devices;
+ platforms[0].getDevices(CL_DEVICE_TYPE_GPU, &devices);
+
+ // Select default device as the first element
+ cl::Device::setDefault(devices[0]);
+ }
+ catch (const cl::Error& clError)
+ {
+ throw ClRuntimeUnavailableException(boost::str(boost::format(
+ "Could not initialize the CL runtime. Error description: %1%. CL error code: %2%"
+ ) % clError.what() % clError.err()));
+ }
+
+ // Remove the use of global CL context
+ cl::Context::setDefault(cl::Context{});
+ BOOST_ASSERT(cl::Context::getDefault()() == NULL);
- cl::Device device;
+ // Remove the use of global CL command queue
+ cl::CommandQueue::setDefault(cl::CommandQueue{});
+ BOOST_ASSERT(cl::CommandQueue::getDefault()() == NULL);
+}
+
+ClWorkloadFactory::~ClWorkloadFactory()
+{
+}
+
+void ClWorkloadFactory::LoadOpenClRuntime()
+{
+ cl::Device device = cl::Device::getDefault();
cl::Context context;
cl::CommandQueue commandQueue;
try
{
- device = cl::Device::getDefault();
- context = cl::Context::getDefault();
+ arm_compute::CLKernelLibrary::get().clear_programs_cache();
+ arm_compute::CLScheduler::get().init(context, commandQueue, device);
+ arm_compute::CLKernelLibrary::get().init(".", context, device);
+
+ context = cl::Context(device);
bool enableProfiling = false;
#if ARMNN_PROFILING_ENABLED
enableProfiling = true;
#endif
- if (clTunedParametersImpl && clTunedParametersImpl->m_Mode == IClTunedParameters::Mode::UpdateTunedParameters)
+ if (m_clTunedParameters && m_clTunedParameters->m_Mode == IClTunedParameters::Mode::UpdateTunedParameters)
{
enableProfiling = true; // Needed for the CLTuner to work.
}
@@ -65,7 +103,7 @@ void ClWorkloadFactory::LoadOpenClRuntime(IClTunedParameters* clTunedParameters)
else
{
// Use default queue
- commandQueue = cl::CommandQueue::getDefault();
+ commandQueue = cl::CommandQueue(context, device);
}
}
catch (const cl::Error& clError)
@@ -79,9 +117,9 @@ void ClWorkloadFactory::LoadOpenClRuntime(IClTunedParameters* clTunedParameters)
arm_compute::CLKernelLibrary::get().init(".", context, device);
arm_compute::ICLTuner* tuner = nullptr;
- if (clTunedParameters)
+ if (m_clTunedParameters)
{
- tuner = &clTunedParametersImpl->m_Tuner;
+ tuner = &m_clTunedParameters->m_Tuner;
}
arm_compute::CLScheduler::get().init(context, commandQueue, device, tuner);
}
@@ -266,7 +304,16 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescri
#else // #if ARMCOMPUTECL_ENABLED
-void ClWorkloadFactory::LoadOpenClRuntime(IClTunedParameters* clTunedParameters)
+ClWorkloadFactory::ClWorkloadFactory(IClTunedParameters* clTunedParameters)
+{
+ // No CL support
+}
+
+ClWorkloadFactory::~ClWorkloadFactory()
+{
+}
+
+void ClWorkloadFactory::LoadOpenClRuntime()
{
// No CL support
}
diff --git a/src/armnn/backends/ClWorkloadFactory.hpp b/src/armnn/backends/ClWorkloadFactory.hpp
index 2477e23eeb..e1e66c050b 100644
--- a/src/armnn/backends/ClWorkloadFactory.hpp
+++ b/src/armnn/backends/ClWorkloadFactory.hpp
@@ -23,18 +23,22 @@ namespace armnn
{
class IClTunedParameters;
+class ClTunedParameters;
// ARM Compute OpenCL workload factory
class ClWorkloadFactory : public IWorkloadFactory
{
public:
- virtual ~ClWorkloadFactory(){};
+
+ ClWorkloadFactory(IClTunedParameters* clTunedParameters = nullptr);
+
+ virtual ~ClWorkloadFactory();
virtual Compute GetCompute() const override { return Compute::GpuAcc; }
static bool IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported);
- void LoadOpenClRuntime(IClTunedParameters* clTunedParameters = nullptr);
+ void LoadOpenClRuntime();
virtual bool SupportsSubTensors() const override { return true; }
@@ -109,6 +113,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+
+private:
+ ClTunedParameters* m_clTunedParameters;
};
class ClTunedParameters : public IClTunedParameters
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index 382b15e277..d8a3366775 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -71,6 +71,22 @@ bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convol
return preferDirectConvolution;
}
+bool IsNeonMultiplicationParamsSupported(std::string* reasonIfUnsupported,
+ const TensorInfo& info0,
+ const TensorInfo& info1)
+{
+ if (info0.GetShape() == info1.GetShape())
+ {
+ return true;
+ }
+
+ if (reasonIfUnsupported)
+ {
+ *reasonIfUnsupported = "Multiplication on Neon does not support implicit broadcast.";
+ }
+ return false;
+}
+
bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, const NormalizationDescriptor& parameters)
{
if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
@@ -233,7 +249,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
- &FalseFuncU8<>);
+ &TrueFunc<>);
}
bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
@@ -293,11 +309,13 @@ bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
const TensorInfo& input1,
std::string* reasonIfUnsupported)
{
- ignore_unused(input1);
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input0.GetDataType(),
- &TrueFunc<>,
- &FalseFuncU8<>);
+ &IsNeonMultiplicationParamsSupported,
+ &FalseFuncU8<const TensorInfo&, const TensorInfo&>,
+ input0,
+ input1
+ );
}
bool IsNormalizationSupportedNeon(const TensorInfo& input,
diff --git a/src/armnn/backends/NeonWorkloadFactory.cpp b/src/armnn/backends/NeonWorkloadFactory.cpp
index 384284114f..0f65a3dcd7 100644
--- a/src/armnn/backends/NeonWorkloadFactory.cpp
+++ b/src/armnn/backends/NeonWorkloadFactory.cpp
@@ -112,7 +112,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePooling2d(const Poo
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d(
const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NeonConvolution2dFloat32Workload, NullWorkload>(descriptor, info);
+ return MakeWorkload<NeonConvolution2dFloat32Workload, NeonConvolution2dUint8Workload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
diff --git a/src/armnn/backends/NeonWorkloads.hpp b/src/armnn/backends/NeonWorkloads.hpp
index 7e9e885adc..83a3e9fd9b 100644
--- a/src/armnn/backends/NeonWorkloads.hpp
+++ b/src/armnn/backends/NeonWorkloads.hpp
@@ -13,7 +13,9 @@
#include "backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp"
#include "backends/NeonWorkloads/NeonConstantFloat32Workload.hpp"
#include "backends/NeonWorkloads/NeonConstantUint8Workload.hpp"
+#include "backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp"
#include "backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp"
+#include "backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp"
#include "backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp"
#include "backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.hpp"
#include "backends/NeonWorkloads/NeonFloorFloat32Workload.hpp"
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
index 5099965a24..10c96d82a6 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp
@@ -73,10 +73,6 @@ NeonConvolution2dBaseWorkload<dataType>::NeonConvolution2dBaseWorkload(const Con
using Type = ResolveType<dataType>;
InitialiseArmComputeTensorData(m_KernelTensor, m_Data.m_Weight->template GetConstTensor<Type>());
- if (m_Data.m_Parameters.m_BiasEnabled)
- {
- InitialiseArmComputeTensorData(m_BiasTensor, m_Data.m_Bias->template GetConstTensor<Type>());
- }
}
// Generate known implementations for linker
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
index 37740511ba..98d075a5ea 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp
@@ -3,6 +3,8 @@
// See LICENSE file in the project root for full license information.
//
+#pragma once
+
#include <backends/Workload.hpp>
#include <backends/NeonWorkloadUtils.hpp>
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
index b4650ac011..a8c5c63683 100644
--- a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp
@@ -15,7 +15,12 @@ using namespace armcomputetensorutils;
NeonConvolution2dFloat32Workload::NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info)
: NeonConvolution2dBaseWorkload(descriptor, info)
-{}
+{
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ InitialiseArmComputeTensorData(m_BiasTensor, m_Data.m_Bias->template GetConstTensor<float>());
+ }
+}
void NeonConvolution2dFloat32Workload::Execute() const
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp
new file mode 100644
index 0000000000..ae20522361
--- /dev/null
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.cpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include "NeonConvolution2dUint8Workload.hpp"
+
+
+namespace armnn
+{
+NeonConvolution2dUint8Workload::NeonConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : NeonConvolution2dBaseWorkload(descriptor, info)
+{
+ if (m_Data.m_Parameters.m_BiasEnabled)
+ {
+ InitialiseArmComputeTensorData(m_BiasTensor, m_Data.m_Bias->template GetConstTensor<int32_t>());
+ }
+}
+
+
+void NeonConvolution2dUint8Workload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, NeonConvolution2dUint8Workload_Execute);
+ m_ConvolutionLayer->run();
+}
+
+void NeonConvolution2dUint8Workload::ValidateData() const
+{
+ m_Data.ValidateInputsOutputs("NeonConvolution2dUint8Workload", 1, 1);
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp
new file mode 100644
index 0000000000..319d574b1e
--- /dev/null
+++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dUint8Workload.hpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#pragma once
+
+#include "NeonConvolution2dBaseWorkload.hpp"
+
+namespace armnn
+{
+
+class NeonConvolution2dUint8Workload : public NeonConvolution2dBaseWorkload<DataType::QuantisedAsymm8>
+{
+public:
+ NeonConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ virtual void ValidateData() const override;
+ virtual void Execute() const override;
+private:
+};
+
+} //namespace armnnn
+
+
+
+
diff --git a/src/armnn/backends/RefWorkloads/Addition.cpp b/src/armnn/backends/RefWorkloads/Addition.cpp
index c26f82ecc2..6d53a702e4 100644
--- a/src/armnn/backends/RefWorkloads/Addition.cpp
+++ b/src/armnn/backends/RefWorkloads/Addition.cpp
@@ -8,9 +8,6 @@
#include <functional>
-namespace armnn
-{
-
namespace
{
@@ -24,6 +21,9 @@ void ElementwiseAddition(unsigned int numElements, const float* inData0, const f
} // namespace
+namespace armnn
+{
+
void Addition(const TensorShape& inShape0,
const TensorShape& inShape1,
const TensorShape& outShape,
diff --git a/src/armnn/backends/RefWorkloads/Merger.hpp b/src/armnn/backends/RefWorkloads/Merger.hpp
index 9695e457e2..476ced76be 100644
--- a/src/armnn/backends/RefWorkloads/Merger.hpp
+++ b/src/armnn/backends/RefWorkloads/Merger.hpp
@@ -39,6 +39,7 @@ void Merger(const MergerQueueDescriptor& data)
//split view extents are defined by the size of (the corresponding) input tensor
const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
+ BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
// check all dimensions to see if this element is inside the given input view
bool insideView = true;
diff --git a/src/armnn/backends/RefWorkloads/Multiplication.cpp b/src/armnn/backends/RefWorkloads/Multiplication.cpp
index 7f558d83c5..47c0f1cef1 100644
--- a/src/armnn/backends/RefWorkloads/Multiplication.cpp
+++ b/src/armnn/backends/RefWorkloads/Multiplication.cpp
@@ -4,18 +4,48 @@
//
#include "Multiplication.hpp"
+#include "Broadcast.hpp"
-namespace armnn
+#include <functional>
+
+namespace
{
-void Multiplication(const float* in0,
- const float* in1,
- unsigned int numElements,
- float* out)
+void ElementwiseMultiplication(unsigned int numElements,
+ const float* inData0,
+ const float* inData1,
+ float* outData)
{
for (unsigned int i = 0; i < numElements; ++i)
{
- out[i] = in0[i] * in1[i];
+ outData[i] = inData0[i] * inData1[i];
+ }
+}
+
+} // namespace
+
+namespace armnn
+{
+
+void Multiplication(const TensorShape& inShape0,
+ const TensorShape& inShape1,
+ const TensorShape& outShape,
+ const float* inData0,
+ const float* inData1,
+ float* outData)
+{
+ if (inShape0 == inShape1)
+ {
+ ElementwiseMultiplication(inShape0.GetNumElements(), inData0, inData1, outData);
+ }
+ else
+ {
+ BroadcastLoop(inShape0, inShape1, outShape).Unroll(
+ std::multiplies<float>(),
+ 0,
+ inData0,
+ inData1,
+ outData);
}
}
diff --git a/src/armnn/backends/RefWorkloads/Multiplication.hpp b/src/armnn/backends/RefWorkloads/Multiplication.hpp
index d0b033e7ec..54fcac51c1 100644
--- a/src/armnn/backends/RefWorkloads/Multiplication.hpp
+++ b/src/armnn/backends/RefWorkloads/Multiplication.hpp
@@ -5,12 +5,16 @@
#pragma once
+#include <armnn/Tensor.hpp>
+
namespace armnn
{
-void Multiplication(const float* in0,
- const float* in1,
- unsigned int numElements,
- float* out);
+void Multiplication(const TensorShape& inShape0,
+ const TensorShape& inShape1,
+ const TensorShape& outShape,
+ const float* inData0,
+ const float* inData1,
+ float* outData);
} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/Pooling2d.cpp b/src/armnn/backends/RefWorkloads/Pooling2d.cpp
index 6d15d8a436..a643e67690 100644
--- a/src/armnn/backends/RefWorkloads/Pooling2d.cpp
+++ b/src/armnn/backends/RefWorkloads/Pooling2d.cpp
@@ -186,8 +186,8 @@ void Pooling2d(const float* in,
// Clamp the pooling region inside the valid input area (which includes the padding).
// This is necessary because the final pooling in a row may overlap beyond the padding.
- hend = std::min(hend, heightInput + padRight);
- wend = std::min(wend, widthInput + padBottom);
+ hend = std::min(hend, heightInput + padBottom);
+ wend = std::min(wend, widthInput + padRight);
float result = defaultInitializer;
float poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
index ed68b1f6db..d7c54d9aad 100644
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp
@@ -17,12 +17,15 @@ void RefMultiplicationFloat32Workload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMultiplicationFloat32Workload_Execute");
- const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape();
+ const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape();
+ const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape();
float* outputData = GetOutputTensorDataFloat(0, m_Data);
const float* inputData0 = GetInputTensorDataFloat(0, m_Data);
const float* inputData1 = GetInputTensorDataFloat(1, m_Data);
- Multiplication(inputData0, inputData1, inputInfo0.GetNumElements(), outputData);
+
+ Multiplication(inShape0, inShape1, outShape, inputData0, inputData1, outputData);
}
} //namespace armnn
diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
index 2e6f0e6c8b..d5c4afd87c 100644
--- a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
+++ b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp
@@ -27,10 +27,9 @@ void RefMultiplicationUint8Workload::Execute() const
auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1);
std::vector<float> results(outputInfo.GetNumElements());
- Multiplication(dequant0.data(),
- dequant1.data(),
- inputInfo0.GetNumElements(),
- results.data());
+ Multiplication(
+ inputInfo0.GetShape(), inputInfo1.GetShape(), outputInfo.GetShape(),
+ dequant0.data(), dequant1.data(),results.data());
Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);
}
diff --git a/src/armnn/backends/RefWorkloads/Splitter.hpp b/src/armnn/backends/RefWorkloads/Splitter.hpp
index 67f6c100f9..74c4cb4e18 100644
--- a/src/armnn/backends/RefWorkloads/Splitter.hpp
+++ b/src/armnn/backends/RefWorkloads/Splitter.hpp
@@ -41,6 +41,7 @@ void Splitter(const SplitterQueueDescriptor& data)
//split view extents are defined by the size of (the corresponding) input tensor
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
+ BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
// check all dimensions to see if this element is inside the given input view
bool insideView = true;
diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp
index 96a37802f1..c951fc5d8d 100644
--- a/src/armnn/backends/WorkloadData.cpp
+++ b/src/armnn/backends/WorkloadData.cpp
@@ -502,16 +502,13 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor");
ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor");
- ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
- workloadInfo.m_InputTensorInfos[1],
- "MultiplicationQueueDescriptor",
- "first input",
- "second input");
- ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
- workloadInfo.m_OutputTensorInfos[0],
- "MultiplicationQueueDescriptor",
- "input",
- "output");
+
+ ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[1],
+ workloadInfo.m_OutputTensorInfos[0],
+ "MultiplicationQueueDescriptor",
+ "first input",
+ "second input");
}
void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
diff --git a/src/armnn/backends/test/ArmComputeCl.cpp b/src/armnn/backends/test/ArmComputeCl.cpp
index 5933cebc80..c45a82db63 100644
--- a/src/armnn/backends/test/ArmComputeCl.cpp
+++ b/src/armnn/backends/test/ArmComputeCl.cpp
@@ -103,7 +103,7 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAve
ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test)
ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest)
ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
- IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
+ IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test)
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test)
@@ -114,6 +114,12 @@ ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddi
ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+ false)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+ true)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
@@ -136,6 +142,8 @@ ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest)
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest)
// Batch Norm
ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
@@ -194,6 +202,9 @@ ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test)
// ============================================================================
// COMPARE tests
diff --git a/src/armnn/backends/test/ArmComputeNeon.cpp b/src/armnn/backends/test/ArmComputeNeon.cpp
index dd8a668940..a81b7cdcd7 100644
--- a/src/armnn/backends/test/ArmComputeNeon.cpp
+++ b/src/armnn/backends/test/ArmComputeNeon.cpp
@@ -141,6 +141,7 @@ ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3
ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true)
ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
+
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
@@ -170,6 +171,11 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test)
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+ true)
ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest)
ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test)
@@ -281,6 +287,10 @@ ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test)
+
// ============================================================================
// COMPARE tests
diff --git a/src/armnn/backends/test/LayerTests.cpp b/src/armnn/backends/test/LayerTests.cpp
index 76681f9a93..9eed2dbf78 100644
--- a/src/armnn/backends/test/LayerTests.cpp
+++ b/src/armnn/backends/test/LayerTests.cpp
@@ -1005,31 +1005,22 @@ LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFa
return ret;
}
-LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- armnn::TensorInfo inputTensorInfo0;
- armnn::TensorInfo inputTensorInfo1;
- armnn::TensorInfo outputTensorInfo;
-
- constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
- constexpr std::size_t dimensionCount = std::extent<decltype(shape)>::value;
-
- inputTensorInfo0 = armnn::TensorInfo(dimensionCount, shape, armnn::DataType::Float32);
- inputTensorInfo1 = armnn::TensorInfo(dimensionCount, shape, armnn::DataType::Float32);
- outputTensorInfo = armnn::TensorInfo(dimensionCount, shape, armnn::DataType::Float32);
-
- auto input0 = MakeTensor<float, 4>(inputTensorInfo0, std::vector<float>({
- 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4 }));
-
- auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>({
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 }));
+namespace {
+LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
+ const unsigned int shape0[4],
+ const std::vector<float> & values0,
+ const unsigned int shape1[4],
+ const std::vector<float> & values1,
+ const unsigned int outShape[4],
+ const std::vector<float> & outValues)
+{
+ const size_t dimensionCount = 4;
+ armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
+ armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
+ armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
+
+ auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
LayerTestResult<float,4> ret(outputTensorInfo);
@@ -1056,11 +1047,84 @@ LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFac
CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
+ ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
+ return ret;
+}
+} // anonymous namespace
+
+
+LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ const unsigned int width = 2;
+ const unsigned int height = 2;
+ const unsigned int channelCount = 2;
+ const unsigned int batchSize = 2;
+
+ unsigned int shape[] = { batchSize, channelCount, height, width };
+
+ std::vector<float> input0({
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4 });
+
+ std::vector<float> input1({
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5 });
+
+ std::vector<float> output({
2, 2, 2, 2, 6, 6, 6, 6,
- 12, 12, 12, 12, 20, 20, 20, 20 }));
+ 12, 12, 12, 12, 20, 20, 20, 20 });
- return ret;
+ return MultiplicationTestHelper(workloadFactory,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
+}
+
+LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<float> input1({ 2 });
+
+ std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
+
+ return MultiplicationTestHelper(workloadFactory,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ unsigned int shape0[] = { 1, 3, 3, 2 };
+ std::vector<float> input0({
+ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18});
+
+ unsigned int shape1[] = { 1, 1, 1, 2 };
+ std::vector<float> input1({ 1, 2 });
+
+ std::vector<float> output({
+ 1, 4, 3, 8, 5, 12,
+ 7, 16, 9, 20, 11, 24,
+ 13, 28, 15, 32, 17, 36});
+
+ return MultiplicationTestHelper(workloadFactory,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
@@ -3253,69 +3317,59 @@ LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadF
return result;
}
-LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
+namespace
{
- unsigned int batchSize = 1;
- unsigned int channels = 2;
- unsigned int height = 2;
- unsigned int width = 3;
+LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
+ const unsigned int shape0[4],
+ const std::vector<uint8_t> & values0,
+ float scale0,
+ int32_t offset0,
+ const unsigned int shape1[4],
+ const std::vector<uint8_t> & values1,
+ float scale1,
+ int32_t offset1,
+ const unsigned int outShape[4],
+ const std::vector<uint8_t> & outValues,
+ float outScale,
+ int32_t outOffset)
+{
+ armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
- armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
- armnn::TensorInfo outputTensorInfo;
+ inputTensorInfo0.SetQuantizationScale(scale0);
+ inputTensorInfo0.SetQuantizationOffset(offset0);
- const unsigned int shape[] = { batchSize, channels, height, width };
- inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
- inputTensorInfo1.SetQuantizationScale(4.0f);
- inputTensorInfo1.SetQuantizationOffset(1);
+ inputTensorInfo1.SetQuantizationScale(scale1);
+ inputTensorInfo1.SetQuantizationOffset(offset1);
- inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
- inputTensorInfo2.SetQuantizationScale(3.0f);
- inputTensorInfo2.SetQuantizationOffset(-2);
+ outputTensorInfo.SetQuantizationScale(outScale);
+ outputTensorInfo.SetQuantizationOffset(outOffset);
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
- outputTensorInfo.SetQuantizationScale(1366.255f); // Scale/offset chosen to have output values out of range
- outputTensorInfo.SetQuantizationOffset(-5);
+ auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
- // See dequantized values to the right
- auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
- {
- 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
- 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
- }));
-
- // See dequantized values to the right
- auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
- {
- 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
- 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
- }));
-
- // See dequantized values to the right
LayerTestResult<uint8_t, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
- {
- 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
- 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
- }));
+ result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::MultiplicationQueueDescriptor data;
armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+ AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+ AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
+ inputHandle0->Allocate();
inputHandle1->Allocate();
- inputHandle2->Allocate();
outputHandle->Allocate();
+ CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
workload->Execute();
@@ -3323,6 +3377,113 @@ LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& wor
return result;
}
+} // anonymous namespace
+
+LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ unsigned int batchSize = 1;
+ unsigned int channels = 2;
+ unsigned int height = 2;
+ unsigned int width = 3;
+ const unsigned int shape[] = { batchSize, channels, height, width };
+
+ // See dequantized values to the right
+ std::vector<uint8_t> input0({
+ 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
+ 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
+ });
+
+ // See dequantized values to the right
+ std::vector<uint8_t> input1({
+ 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
+ 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
+ });
+
+ // See dequantized values to the right
+ std::vector<uint8_t> output(
+ {
+ 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
+ 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
+ });
+
+ return MultiplicationUint8TestHelper(workloadFactory,
+ shape,
+ input0,
+ 4.0f,
+ 1,
+ shape,
+ input1,
+ 3.0f,
+ -2,
+ shape,
+ output,
+ 1366.255f, // Scale/offset chosen to have output values out of range
+ -5);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<uint8_t> input0({
+ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12
+ });
+
+ std::vector<uint8_t> input1({2});
+
+ std::vector<uint8_t> output({
+ 2, 4, 6, 8, 10, 12,
+ 14, 16, 18, 20, 22, 24
+ });
+
+ return MultiplicationUint8TestHelper(workloadFactory,
+ shape0,
+ input0,
+ 1.0f,
+ 0,
+ shape1,
+ input1,
+ 1.0f,
+ 0,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<uint8_t> input0({
+ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12
+ });
+
+ std::vector<uint8_t> input1({1, 2, 3});
+
+ std::vector<uint8_t> output({
+ 1, 4, 9, 4, 10, 18,
+ 7, 16, 27, 10, 22, 36
+ });
+
+ return MultiplicationUint8TestHelper(workloadFactory,
+ shape0,
+ input0,
+ 1.0f,
+ 0,
+ shape1,
+ input1,
+ 1.0f,
+ 0,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
{
@@ -3702,6 +3863,12 @@ LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFact
return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
}
+LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
+ bool forceNoPadding)
+{
+ return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
+}
+
LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
{
return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
@@ -3882,3 +4049,18 @@ LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& work
{
return SimplePermuteUint8TestCommon(workloadFactory);
};
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ return PermuteFloat32ValueSet1TestCommon(workloadFactory);
+};
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ return PermuteFloat32ValueSet2TestCommon(workloadFactory);
+};
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ return PermuteFloat32ValueSet3TestCommon(workloadFactory);
+};
diff --git a/src/armnn/backends/test/LayerTests.hpp b/src/armnn/backends/test/LayerTests.hpp
index fc0c9c7b14..36e73e461c 100644
--- a/src/armnn/backends/test/LayerTests.hpp
+++ b/src/armnn/backends/test/LayerTests.hpp
@@ -82,6 +82,8 @@ LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWork
LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
+ bool forceNoPadding);
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory);
@@ -187,6 +189,8 @@ LayerTestResult<float, 4> CompareActivationTest(armnn::IWorkloadFactory& worklo
unsigned int batchSize);
LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<float, 4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& refWorkloadFactory);
@@ -260,6 +264,8 @@ LayerTestResult<uint8_t, 2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& wor
float beta);
LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
bool biasEnabled);
@@ -303,3 +309,6 @@ LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workl
LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory);
diff --git a/src/armnn/backends/test/PermuteTestImpl.hpp b/src/armnn/backends/test/PermuteTestImpl.hpp
index 4eafa1a211..4ecffedc91 100644
--- a/src/armnn/backends/test/PermuteTestImpl.hpp
+++ b/src/armnn/backends/test/PermuteTestImpl.hpp
@@ -119,3 +119,107 @@ LayerTestResult<uint8_t, 4> SimplePermuteUint8TestCommon(armnn::IWorkloadFactory
return SimplePermuteTestImpl<uint8_t>(workloadFactory, descriptor, inputTensorInfo,
outputTensorInfo, input, outputExpected);
}
+
+LayerTestResult<float, 4>
+PermuteFloat32ValueSet1TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 1, 2, 2, 3 };
+ unsigned int outputShape[] = { 1, 3, 2, 2 };
+
+ armnn::PermuteDescriptor descriptor;
+ descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
+
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f,
+ 11.0f, 12.0f, 13.0f,
+ 21.0f, 22.0f, 23.0f,
+ 31.0f, 32.0f, 33.0f,
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 11.0f, 21.0f, 31.0f,
+ 2.0f, 12.0f, 22.0f, 32.0f,
+ 3.0f, 13.0f, 23.0f, 33.0f,
+ });
+
+ return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+ outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<float, 4>
+PermuteFloat32ValueSet2TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 1, 3, 2, 2 };
+ unsigned int outputShape[] = { 1, 2, 2, 3 };
+
+ armnn::PermuteDescriptor descriptor;
+ descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
+
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 11.0f, 21.0f, 31.0f,
+ 2.0f, 12.0f, 22.0f, 32.0f,
+ 3.0f, 13.0f, 23.0f, 33.0f,
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f,
+ 11.0f, 12.0f, 13.0f,
+ 21.0f, 22.0f, 23.0f,
+ 31.0f, 32.0f, 33.0f,
+ });
+
+ return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+ outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<float, 4>
+PermuteFloat32ValueSet3TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 1, 2, 3, 3 };
+ unsigned int outputShape[] = { 1, 3, 2, 3 };
+
+ armnn::PermuteDescriptor descriptor;
+ descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
+
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+ std::vector<float> input = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f,
+ 11.0f, 12.0f, 13.0f,
+ 21.0f, 22.0f, 23.0f,
+ 31.0f, 32.0f, 33.0f,
+ 41.0f, 42.0f, 43.0f,
+ 51.0f, 52.0f, 53.0f,
+ });
+
+ std::vector<float> outputExpected = std::vector<float>(
+ {
+ 1.0f, 11.0f, 21.0f, 31.0f, 41.0f, 51.0f,
+ 2.0f, 12.0f, 22.0f, 32.0f, 42.0f, 52.0f,
+ 3.0f, 13.0f, 23.0f, 33.0f, 43.0f, 53.0f,
+ });
+
+ return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+ outputTensorInfo, input, outputExpected);
+}
diff --git a/src/armnn/backends/test/Pooling2dTestImpl.hpp b/src/armnn/backends/test/Pooling2dTestImpl.hpp
index fc84ddb2ca..ab9fd6d6fb 100644
--- a/src/armnn/backends/test/Pooling2dTestImpl.hpp
+++ b/src/armnn/backends/test/Pooling2dTestImpl.hpp
@@ -720,6 +720,83 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkl
return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
}
+//
+// Tests max pooling with the following parameters:
+//
+// Pooling size: 3x2
+// Stride: (2,2)
+// input size: 3x2
+// channels: 1
+// batch size: 1
+//
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ bool forceNoPadding,
+ float qScale = 1.0f,
+ int32_t qOffset = 0)
+{
+ armnn::Pooling2dDescriptor descriptor;
+ descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+ descriptor.m_PoolWidth = 3;
+ descriptor.m_PoolHeight = 2;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
+ descriptor.m_PadRight = descriptor.m_PadLeft;
+ descriptor.m_PadTop = 0;
+ descriptor.m_PadBottom = 0;
+ descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+ descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+ unsigned int inputWidth = 3;
+ unsigned int inputHeight = 2;
+ unsigned int outputWidth =
+ (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
+ descriptor.m_StrideX;
+ unsigned int outputHeight =
+ (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
+ descriptor.m_StrideY;
+ unsigned int channels = 1;
+ unsigned int batchSize = 1;
+
+ std::vector<float> inputData = {
+ 3.0f, 6.0f, 9.0f,
+ 12.0f, 15.0f, 18.0f,
+ };
+
+ std::vector<float> expectedOutputDataWithPadding = {
+ 6.0f, 8.0f,
+ };
+
+ std::vector<float> expectedOutputDataNoPadding = {
+ 10.5f,
+ };
+
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+
+ // Scale and offset should match input - we're just calculating average values.
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+
+ // Set quantization parameters if the requested type is a quantized type.
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+
+ auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
+ QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+
+ return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+
template<typename T>
LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
float qScale = 1.0f,
diff --git a/src/armnn/backends/test/Reference.cpp b/src/armnn/backends/test/Reference.cpp
index 87d82f1781..89e5db8e43 100644
--- a/src/armnn/backends/test/Reference.cpp
+++ b/src/armnn/backends/test/Reference.cpp
@@ -76,6 +76,10 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2
ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false)
+ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
+ IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, true)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
@@ -158,7 +162,11 @@ ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Te
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest)
ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test)
// Batch Norm
ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
@@ -227,5 +235,8 @@ ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1, PermuteFloat32ValueSet1Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2, PermuteFloat32ValueSet2Test)
+ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3, PermuteFloat32ValueSet3Test)
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/optimizations/Optimization.hpp b/src/armnn/optimizations/Optimization.hpp
index 89e03ff88d..f81071891b 100644
--- a/src/armnn/optimizations/Optimization.hpp
+++ b/src/armnn/optimizations/Optimization.hpp
@@ -13,7 +13,7 @@ namespace armnn
class Optimization
{
public:
- virtual void Run(Graph& graph, Graph::Iterator& pos) const = 0;
+ virtual void Run(Graph& graph, Layer& base) const = 0;
protected:
~Optimization() = default;
};
@@ -23,22 +23,20 @@ protected:
// (curiously recurring template pattern).
// For details, see https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern
-/// Wrapper Optimization base class that calls Wrapped::Run for every layer of type BaseType.
-/// - Wrapped class mustn't remove the base layer.
-/// - Base layer is removed if left unconnected after applying the wrapped optimization.
+/// Wrapper Optimization base class that calls Wrapped::Run() for every layer of type BaseType.
+/// - Wrapped class mustn't remove the base layer. The optimizer will remove it if left unconnected
+/// after applying each optimization.
template <typename BaseType, typename Wrapped>
class OptimizeForTypeImpl : public armnn::Optimization, public Wrapped
{
public:
using Wrapped::Wrapped;
- void Run(Graph& graph, Graph::Iterator& pos) const override
+ void Run(Graph& graph, Layer& base) const override
{
- Layer* const base = *pos;
-
- if (base->GetType() == LayerEnumOf<BaseType>())
+ if (base.GetType() == LayerEnumOf<BaseType>())
{
- Wrapped::Run(graph, *boost::polymorphic_downcast<BaseType*>(base));
+ Wrapped::Run(graph, *boost::polymorphic_downcast<BaseType*>(&base));
}
}
@@ -46,16 +44,16 @@ protected:
~OptimizeForTypeImpl() = default;
};
-/// Specialization that calls Wrapped::Run for any layer type
+/// Specialization that calls Wrapped::Run() for any layer type
template <typename Wrapped>
class OptimizeForTypeImpl<Layer, Wrapped> : public armnn::Optimization, public Wrapped
{
public:
using Wrapped::Wrapped;
- void Run(Graph& graph, Graph::Iterator& pos) const override
+ void Run(Graph& graph, Layer& base) const override
{
- Wrapped::Run(graph, **pos);
+ Wrapped::Run(graph, base);
}
protected:
@@ -70,9 +68,10 @@ public:
};
/// Wrapper Optimization class that calls Wrapped::Run for every connection BaseType -> ChildType.
-/// - Wrapped class mustn't remove the base layer.
+/// - Wrapped class mustn't remove the base layer. The optimizer will remove it if left unconnected
+/// after applying each optimization.
/// - Wrapped class mustn't affect existing connections in the same output. It might add new ones.
-/// - Base and children layers are removed if left unconnected after applying the wrapped optimization.
+/// - Children layers are removed if left unconnected after applying the wrapped optimization.
template <typename BaseType, typename ChildType, typename Wrapped>
class OptimizeForConnectionImpl : public Wrapped
{
diff --git a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
index deb49c6884..9a926a57a4 100644
--- a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
+++ b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
@@ -18,8 +18,8 @@ public:
/// Inserts an equivalent ReshapeLayer that bypasses both for that connection.
void Run(Graph& graph, InputSlot& connection) const
{
- auto& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
- auto& child = connection.GetOwningLayer();
+ Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
+ Layer& child = connection.GetOwningLayer();
BOOST_ASSERT(base.GetType() == LayerType::Reshape);
BOOST_ASSERT(child.GetType() == LayerType::Reshape);
diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp
index 2dfe91fdcc..c5ce28e723 100644
--- a/src/armnn/optimizations/SquashEqualSiblings.hpp
+++ b/src/armnn/optimizations/SquashEqualSiblings.hpp
@@ -26,19 +26,29 @@ public:
if (!child.IsOutputUnconnected())
{
OutputSlot& baseOutput = *connection.GetConnectedOutputSlot();
- auto& comparableChild = *boost::polymorphic_downcast<Comparable*>(&child);
- for (auto&& it : baseOutput.GetConnections())
+ if (baseOutput.GetNumConnections() > 1)
{
- Layer& sibling = it->GetOwningLayer();
- if ((&sibling != &child) && comparableChild.IsEqual(sibling))
+ auto& comparableChild = *boost::polymorphic_downcast<Comparable*>(&child);
+
+ Layer* lowestPriorityChild = &child;
+ for (auto&& it : baseOutput.GetConnections())
{
- // Bypass sibling. It will be removed as it's left unconnected.
- auto siblingOut = sibling.BeginOutputSlots();
- for (auto childOut = child.BeginOutputSlots(); childOut != child.EndOutputSlots(); ++childOut)
+ Layer* sibling = &it->GetOwningLayer();
+ if ((sibling != lowestPriorityChild) && comparableChild.IsEqual(*sibling))
{
- siblingOut->MoveAllConnections(*childOut);
- ++siblingOut;
+ if (sibling->GetPriority() < lowestPriorityChild->GetPriority())
+ {
+ std::swap(sibling, lowestPriorityChild);
+ }
+ // Bypass sibling. It will be removed as it's left unconnected.
+ auto siblingOut = sibling->BeginOutputSlots();
+ for (auto lowestPriorityChildOut = lowestPriorityChild->BeginOutputSlots();
+ lowestPriorityChildOut != lowestPriorityChild->EndOutputSlots(); ++lowestPriorityChildOut)
+ {
+ siblingOut->MoveAllConnections(*lowestPriorityChildOut);
+ ++siblingOut;
+ }
}
}
}
diff --git a/src/armnn/test/Network_test.cpp b/src/armnn/test/Network_test.cpp
index 523d47b169..057caa0505 100644
--- a/src/armnn/test/Network_test.cpp
+++ b/src/armnn/test/Network_test.cpp
@@ -29,6 +29,64 @@ bool AreAllLayerInputSlotsConnected(const armnn::IConnectableLayer& layer)
BOOST_AUTO_TEST_SUITE(Network)
+BOOST_AUTO_TEST_CASE(LayerGuids)
+{
+ armnn::Network net;
+ armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
+ armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
+ armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
+
+ BOOST_TEST(inputId != addId);
+ BOOST_TEST(addId != outputId);
+ BOOST_TEST(inputId != outputId);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeToDot)
+{
+ armnn::Network net;
+
+ //define layers
+ auto input = net.AddInputLayer(0);
+ auto add = net.AddAdditionLayer();
+ auto output = net.AddOutputLayer(0);
+
+ // connect layers
+ input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ armnn::TensorShape shape({4});
+ armnn::TensorInfo info(shape, armnn::DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(info);
+ add->GetOutputSlot(0).SetTensorInfo(info);
+
+ armnn::DeviceSpec spec;
+ spec.DefaultComputeDevice = armnn::Compute::CpuAcc;
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, spec);
+
+ std::ostringstream ss;
+ optimizedNet->SerializeToDot(ss);
+
+ auto inputId = input->GetGuid();
+ auto addId = add->GetGuid();
+ auto outputId = output->GetGuid();
+
+ std::stringstream expected;
+ expected <<
+ "digraph Optimized {\n"
+ " node [shape=\"record\"];\n"
+ " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
+ " " << inputId << " [label=\"{Input}\"];\n"
+ " " << addId << " [label=\"{Addition}\"];\n"
+ " " << outputId << " [label=\"{Output}\"];\n"
+ " " << inputId << " -> " << addId << " [label=< [4] >];\n"
+ " " << inputId << " -> " << addId << " [label=< [4] >];\n"
+ " " << addId << " -> " << outputId << " [label=< [4] >];\n"
+ "}\n";
+
+ BOOST_TEST(ss.str() == expected.str());
+}
+
BOOST_AUTO_TEST_CASE(NetworkBasic)
{
armnn::Network net;
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
new file mode 100644
index 0000000000..da26fba76e
--- /dev/null
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -0,0 +1,334 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+
+#include "armnn/ArmNN.hpp"
+#include "Graph.hpp"
+#include "Optimizer.hpp"
+
+namespace
+{
+template <typename LayerT>
+bool IsLayerOfType(const armnn::Layer* const layer)
+{
+ return (layer->GetType() == armnn::LayerEnumOf<LayerT>());
+}
+
+bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
+{
+ return (first == last);
+}
+
+/// Check each unary function in Us evaluates true for each correspondent layer in the sequence [first, last)
+template <typename U, typename... Us>
+bool CheckSequence(const armnn::Graph::ConstIterator first,
+ const armnn::Graph::ConstIterator last,
+ U&& u,
+ Us&&... us)
+{
+ return u(*first) && CheckSequence(std::next(first), last, us...);
+}
+}
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+
+BOOST_AUTO_TEST_CASE(OptimizeInversePermutes)
+{
+ armnn::Graph graph;
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+ // Insert two permutes, one the inverse of the other
+ graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
+ armnn::PermuteDescriptor({0, 2, 3, 1}),
+ "perm0231");
+ graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
+ armnn::PermuteDescriptor({0, 3, 1, 2}),
+ "perm0312");
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permutes are removed
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_CASE(MovePermuteUp)
+{
+ const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
+
+ armnn::Graph graph;
+
+ armnn::LayerBindingId inputId = 0;
+
+ armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ // Insert permute
+ head = graph.InsertNewLayer<armnn::PermuteLayer>(head->GetInputSlot(0),
+ armnn::PermuteDescriptor({ 0, 2, 3, 1 }), "");
+ head->GetOutputHandler().SetTensorInfo(permuted);
+
+ // Insert layers that don't care about data format
+ head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0),
+ armnn::ActivationDescriptor{}, "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ // Insert input for 2nd input of Addition
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+ ->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::FakeQuantizationLayer>(head->GetInputSlot(0),
+ armnn::FakeQuantizationDescriptor{}, "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ // Insert input for 2nd input of Multiplication
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+ ->GetOutputHandler().SetTensorInfo(info);
+
+ // Insert input
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
+ ->GetOutputHandler().SetTensorInfo(info);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permute is moved to the top. New permutes for layers with multiple inputs
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_CASE(PermuteAsReshape)
+{
+ armnn::Graph graph;
+
+ const armnn::TensorInfo infoIn({ 1, 2, 3, 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo infoOut({ 1, 1, 2, 3 }, armnn::DataType::Float32);
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input")
+ ->GetOutputHandler().SetTensorInfo(infoIn);
+
+ // Insert permute
+ graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
+ armnn::PermuteDescriptor({ 0, 2, 3, 1 }), "")
+ ->GetOutputHandler().SetTensorInfo(infoOut);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permute is replaced by an equivalent reshape.
+
+ auto checkReshape = [&infoOut](const armnn::Layer* const layer) -> bool
+ {
+ const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
+ return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
+ (reshapeLayer->GetParameters().m_TargetShape == infoOut.GetShape()) &&
+ (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
+ };
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ checkReshape,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapes)
+{
+ armnn::Graph graph;
+
+ const armnn::TensorInfo info0({ 1, 2, 3, 5 }, armnn::DataType::Float32);
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+ auto input = graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+ input->GetOutputHandler().SetTensorInfo(info0);
+
+ {
+ // Insert two reshapes
+ const armnn::TensorInfo info1({1, 30, 1, 1}, armnn::DataType::Float32);
+ const armnn::TensorInfo info2({1, 2, 1, 15}, armnn::DataType::Float32);
+
+ auto reshape1 = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
+ armnn::ReshapeDescriptor{ info1.GetShape() },
+ "reshape1");
+ auto reshape2 = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
+ armnn::ReshapeDescriptor{ info2.GetShape() },
+ "reshape2");
+
+ reshape1->GetOutputHandler().SetTensorInfo(info1);
+ reshape2->GetOutputHandler().SetTensorInfo(info2);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ auto checkReshape = [&info2](const armnn::Layer* const layer) -> bool
+ {
+ const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
+ return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
+ (reshapeLayer->GetParameters().m_TargetShape == info2.GetShape()) &&
+ (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == info2.GetShape());
+ };
+
+ // The two reshapes are replaced by a single equivalent reshape
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ checkReshape,
+ &IsLayerOfType<armnn::OutputLayer>));
+ }
+
+ {
+ // Insert a reshape to the input shape
+ auto reshapeToIn = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
+ armnn::ReshapeDescriptor{ info0.GetShape() },
+ "reshapeToIn");
+
+ reshapeToIn->GetOutputHandler().SetTensorInfo(info0);
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The two reshapes are removed
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(SquashEqualSiblings)
+{
+ armnn::Graph graph;
+
+ armnn::LayerBindingId outputId = 0;
+
+ const armnn::TensorInfo info({ 1, 2, 3, 5 }, armnn::DataType::Float32);
+ const armnn::TensorInfo permuted({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+
+ auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+ input->GetOutputSlot().SetTensorInfo(info);
+
+ // Insert equal permutes, equal reshapes and something else
+ const armnn::PermuteDescriptor permDesc({ 0, 2, 3, 1 });
+ const armnn::ReshapeDescriptor reshapeDesc{ { 1, 3, 1, 5 } };
+
+ armnn::Layer* layer;
+
+ layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
+ layer->GetOutputSlot().SetTensorInfo(permuted);
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::FloorLayer>("");
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
+ layer->GetOutputSlot().SetTensorInfo(permuted);
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permutes and reshapes are squashed.
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 117df5e55a..e42d71c37d 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
BOOST_TEST(leakedBefore == leakedAfter);
// Add resonable threshold after and before running valgrind with the ACL clear cache function.
- BOOST_TEST(reachableAfter - reachableBefore < 30000);
+ BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 1024);
// these are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning
@@ -178,7 +178,18 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
// if we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass
BOOST_TEST(leakedBefore == leakedAfter);
- BOOST_TEST(reachableBefore == reachableAfter);
+
+ #if defined(ARMCOMPUTECL_ENABLED)
+ // reachableBefore == reachableAfter should hold, but on OpenCL with Android we are still
+ // not entirely able to control the memory in the OpenCL driver. Testing is showing that
+ // after this test (which clears all OpenCL memory) we are clearing a little bit more than
+ // we expect, probably depending on the order in which other tests are run.
+ BOOST_TEST(reachableBefore - reachableAfter <= 24);
+ #else
+ BOOST_TEST(reachableBefore == reachableAfter);
+ #endif
+
+ BOOST_TEST(reachableBefore >= reachableAfter);
// these are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning