aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-05-22 14:24:13 +0100
committerJim Flynn <jim.flynn@arm.com>2019-05-28 17:50:33 +0100
commite242f2dc646f41e9162aaaf74e057ce39fcb92df (patch)
treed6c49b559c34d1d306b1e901501dded1c18f71c5 /src/armnn/layers
parent2f2778f36e59537bbd47fb8b21e73c6c5a949584 (diff)
downloadarmnn-e242f2dc646f41e9162aaaf74e057ce39fcb92df.tar.gz
IVGCVSW-3119 Rename MergerLayer to ConcatLayer
!android-nn-driver:1210 Change-Id: I940b3b9e421c92bfd55ae996f7bc54ac077f2604 Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'src/armnn/layers')
-rw-r--r--src/armnn/layers/ConcatLayer.cpp (renamed from src/armnn/layers/MergerLayer.cpp)60
-rw-r--r--src/armnn/layers/ConcatLayer.hpp55
-rw-r--r--src/armnn/layers/MergerLayer.hpp50
3 files changed, 87 insertions, 78 deletions
diff --git a/src/armnn/layers/MergerLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 9dbfdcc35d..1d2641cd60 100644
--- a/src/armnn/layers/MergerLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "MergerLayer.hpp"
+#include "ConcatLayer.hpp"
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
@@ -14,14 +14,14 @@
namespace armnn
{
-MergerLayer::MergerLayer(const OriginsDescriptor& param, const char* name)
- : LayerWithParameters(param.GetNumViews(), 1, LayerType::Merger, param, name)
+ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
+ : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
{
}
-std::unique_ptr<IWorkload> MergerLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
{
- MergerQueueDescriptor descriptor;
+ ConcatQueueDescriptor descriptor;
// Copies the view origins to the descriptor.
descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
@@ -34,24 +34,24 @@ std::unique_ptr<IWorkload> MergerLayer::CreateWorkload(const Graph& graph, const
return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor, graph));
}
-void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
+void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
{
- //If sub tensors are supported then the merger
+ //If sub tensors are supported then the concat
//just needs to make sure that the outputs of the prev layer
- //are made subtensors of the output of the merger layer.
+ //are made subtensors of the output of the concat layer.
m_OutputHandlers[0].CreateTensorHandles(factory);
if (factory.SupportsSubTensors())
{
- std::queue<MergerLayer*> m_MergerLayers;
+ std::queue<ConcatLayer*> m_ConcatLayers;
- m_MergerLayers.push(this);
- while (!m_MergerLayers.empty())
+ m_ConcatLayers.push(this);
+ while (!m_ConcatLayers.empty())
{
- MergerLayer* currentLayer = m_MergerLayers.front();
+ ConcatLayer* currentLayer = m_ConcatLayers.front();
ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
- m_MergerLayers.pop();
+ m_ConcatLayers.pop();
const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
@@ -99,14 +99,14 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact
OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
OutputHandler& outputHandler = slot->GetOutputHandler();
- BOOST_ASSERT_MSG(subTensor, "MergerLayer: Expected a valid sub-tensor for substitution.");
+ BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
outputHandler.SetData(std::move(subTensor));
Layer& inputLayer = slot->GetOwningLayer();
- if (inputLayer.GetType() == LayerType::Merger)
+ if (inputLayer.GetType() == LayerType::Concat)
{
- // Continue with the substitution if the connected inputs are also merger layers
- m_MergerLayers.push(boost::polymorphic_downcast<MergerLayer*>(&inputLayer));
+ // Continue with the substitution if the connected inputs are also concat layers
+ m_ConcatLayers.push(boost::polymorphic_downcast<ConcatLayer*>(&inputLayer));
}
++i;
}
@@ -114,12 +114,12 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact
}
}
-MergerLayer* MergerLayer::Clone(Graph& graph) const
+ConcatLayer* ConcatLayer::Clone(Graph& graph) const
{
- return CloneBase<MergerLayer>(graph, m_Param, GetName());
+ return CloneBase<ConcatLayer>(graph, m_Param, GetName());
}
-std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
@@ -129,7 +129,7 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
auto& inputShape = inputShapes[i];
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: Num Dimensions must match all inputs.",
+ "ConcatLayer: Num Dimensions must match all inputs.",
numDims,
inputShape.GetNumDimensions());
}
@@ -151,7 +151,7 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
// Checks that the bounding box starts at the origin.
if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
{
- throw LayerValidationException("MergerLayer: there is no view that starts at the origin");
+ throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
}
// Checks that there are no overlaps of views (this would lead to undefined output at those locations).
@@ -182,7 +182,7 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
}
if (allAxesOverlap)
{
- throw LayerValidationException("MergerLayer: Some views overlap.");
+ throw LayerValidationException("ConcatLayer: Some views overlap.");
}
}
}
@@ -202,18 +202,18 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
}
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: there are some gaps between views",
+ "ConcatLayer: there are some gaps between views",
totalViewsVolume,
outputVolume);
return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
}
-void MergerLayer::ValidateTensorShapesFromInputs()
+void ConcatLayer::ValidateTensorShapesFromInputs()
{
- // Validates Merger layer.
+ // Validates Concat layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: Num Inputs must match num views.",
+ "ConcatLayer: Num Inputs must match num views.",
m_Param.GetNumViews(),
GetNumInputSlots());
@@ -230,14 +230,14 @@ void MergerLayer::ValidateTensorShapesFromInputs()
BOOST_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
GetOutputSlot(0).GetTensorInfo().GetShape(),
inferredShapes[0]);
}
-void MergerLayer::Accept(ILayerVisitor& visitor) const
+void ConcatLayer::Accept(ILayerVisitor& visitor) const
{
- visitor.VisitMergerLayer(this, GetParameters(), GetName());
+ visitor.VisitConcatLayer(this, GetParameters(), GetName());
}
} // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
new file mode 100644
index 0000000000..4268291916
--- /dev/null
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a merge operation.
+class ConcatLayer : public LayerWithParameters<OriginsDescriptor>
+{
+public:
+ /// Makes a workload for the Concat type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+ /// otherwise creates tensor handlers.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ConcatLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ConcatLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ConcatLayer.
+ /// @param [in] param OriginsDescriptor to configure the concat operation.
+ /// @param [in] name Optional name for the layer.
+ ConcatLayer(const OriginsDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ConcatLayer() = default;
+};
+
+} // namespace
diff --git a/src/armnn/layers/MergerLayer.hpp b/src/armnn/layers/MergerLayer.hpp
index 6f0c1489d4..32710609eb 100644
--- a/src/armnn/layers/MergerLayer.hpp
+++ b/src/armnn/layers/MergerLayer.hpp
@@ -4,52 +4,6 @@
//
#pragma once
-#include "LayerWithParameters.hpp"
+#include "ConcatLayer.hpp"
-namespace armnn
-{
-
-/// This layer represents a merge operation.
-class MergerLayer : public LayerWithParameters<OriginsDescriptor>
-{
-public:
- /// Makes a workload for the Merger type.
- /// @param [in] graph The graph where this layer can be found.
- /// @param [in] factory The workload factory which will create the workload.
- /// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
-
- /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
- /// otherwise creates tensor handlers.
- /// @param [in] graph The graph where this layer can be found.
- /// @param [in] factory The workload factory which will create the workload.
- virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
-
- /// Creates a dynamically-allocated copy of this layer.
- /// @param [in] graph The graph into which this layer is being cloned.
- MergerLayer* Clone(Graph& graph) const override;
-
- /// Check if the input tensor shape(s)
- /// will lead to a valid configuration of @ref MergerLayer.
- void ValidateTensorShapesFromInputs() override;
-
- /// By default returns inputShapes if the number of inputs are equal to number of outputs,
- /// otherwise infers the output shapes from given input shapes and layer properties.
- /// @param [in] inputShapes The input shapes layer has.
- /// @return A vector to the inferred output shape.
- std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
-
- void Accept(ILayerVisitor& visitor) const override;
-
-protected:
- /// Constructor to create a MergerLayer.
- /// @param [in] param OriginsDescriptor to configure the merger operation.
- /// @param [in] name Optional name for the layer.
- MergerLayer(const OriginsDescriptor& param, const char* name);
-
- /// Default destructor
- ~MergerLayer() = default;
-};
-
-} // namespace
+using MergerLayer = ConcatLayer; \ No newline at end of file