aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-05-22 14:24:13 +0100
committerJim Flynn <jim.flynn@arm.com>2019-05-28 17:50:33 +0100
commite242f2dc646f41e9162aaaf74e057ce39fcb92df (patch)
treed6c49b559c34d1d306b1e901501dded1c18f71c5 /src/armnn
parent2f2778f36e59537bbd47fb8b21e73c6c5a949584 (diff)
downloadarmnn-e242f2dc646f41e9162aaaf74e057ce39fcb92df.tar.gz
IVGCVSW-3119 Rename MergerLayer to ConcatLayer
!android-nn-driver:1210 Change-Id: I940b3b9e421c92bfd55ae996f7bc54ac077f2604 Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/DynamicQuantizationVisitor.cpp4
-rw-r--r--src/armnn/DynamicQuantizationVisitor.hpp8
-rw-r--r--src/armnn/InternalTypes.cpp2
-rw-r--r--src/armnn/InternalTypes.hpp2
-rw-r--r--src/armnn/LayerSupport.cpp6
-rw-r--r--src/armnn/LayersFwd.hpp4
-rw-r--r--src/armnn/LoadedNetwork.cpp2
-rw-r--r--src/armnn/Network.cpp10
-rw-r--r--src/armnn/Network.hpp4
-rw-r--r--src/armnn/QuantizerVisitor.cpp18
-rw-r--r--src/armnn/QuantizerVisitor.hpp8
-rw-r--r--src/armnn/StaticRangeVisitor.cpp36
-rw-r--r--src/armnn/StaticRangeVisitor.hpp8
-rw-r--r--src/armnn/layers/ConcatLayer.cpp (renamed from src/armnn/layers/MergerLayer.cpp)60
-rw-r--r--src/armnn/layers/ConcatLayer.hpp55
-rw-r--r--src/armnn/layers/MergerLayer.hpp50
-rw-r--r--src/armnn/test/CreateWorkload.hpp78
-rw-r--r--src/armnn/test/GraphTests.cpp12
-rw-r--r--src/armnn/test/NetworkTests.cpp28
-rw-r--r--src/armnn/test/QuantizerTest.cpp30
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp64
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp24
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp42
23 files changed, 277 insertions, 278 deletions
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index 9b33fb7642..d4e0c9006c 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -242,8 +242,8 @@ void DynamicQuantizationVisitor::VisitConstantLayer(const IConnectableLayer* lay
SetRange(layer, 0, min, max);
}
-void DynamicQuantizationVisitor::VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
+void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer,
+ const ConcatDescriptor& originsDescriptor,
const char* name)
{
float min = std::numeric_limits<float>::max();
diff --git a/src/armnn/DynamicQuantizationVisitor.hpp b/src/armnn/DynamicQuantizationVisitor.hpp
index 6d430f1142..43768fd077 100644
--- a/src/armnn/DynamicQuantizationVisitor.hpp
+++ b/src/armnn/DynamicQuantizationVisitor.hpp
@@ -71,14 +71,14 @@ public:
const SoftmaxDescriptor& softmaxDescriptor,
const char* name = nullptr) override;
+ void VisitConcatLayer(const IConnectableLayer* layer,
+ const ConcatDescriptor& originsDescriptor,
+ const char* name = nullptr) override;
+
void VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name = nullptr) override;
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) override;
-
void VisitReshapeLayer(const IConnectableLayer* layer,
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index a811706dfe..47a6f60534 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -18,6 +18,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Addition: return "Addition";
case LayerType::BatchNormalization: return "BatchNormalization";
case LayerType::BatchToSpaceNd: return "BatchToSpaceNd";
+ case LayerType::Concat: return "Concat";
case LayerType::Constant: return "Constant";
case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32";
case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16";
@@ -40,7 +41,6 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Mean: return "Mean";
case LayerType::MemCopy: return "MemCopy";
case LayerType::Merge: return "Merge";
- case LayerType::Merger: return "Merger";
case LayerType::Minimum: return "Minimum";
case LayerType::Multiplication: return "Multiplication";
case LayerType::Normalization: return "Normalization";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 5765b5bcf1..9a215e6d39 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -18,6 +18,7 @@ enum class LayerType
Addition,
BatchNormalization,
BatchToSpaceNd,
+ Concat,
Constant,
ConvertFp16ToFp32,
ConvertFp32ToFp16,
@@ -40,7 +41,6 @@ enum class LayerType
Mean,
MemCopy,
Merge,
- Merger,
Minimum,
Multiplication,
Normalization,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 5867fab039..5324e5f2fe 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -137,9 +137,9 @@ bool IsConcatSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsMergerSupported(backend, inputs, output, descriptor, reasonIfUnsupported, reasonIfUnsupportedMaxLength);
- ARMNN_NO_DEPRECATE_WARN_END
+ BOOST_ASSERT(inputs.size() > 0);
+
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
}
bool IsConstantSupported(const BackendId& backend,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 31cfa66896..40330f2234 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -10,6 +10,7 @@
#include "layers/AdditionLayer.hpp"
#include "layers/BatchNormalizationLayer.hpp"
#include "layers/BatchToSpaceNdLayer.hpp"
+#include "layers/ConcatLayer.hpp"
#include "layers/ConstantLayer.hpp"
#include "layers/ConvertFp16ToFp32Layer.hpp"
#include "layers/ConvertFp32ToFp16Layer.hpp"
@@ -32,7 +33,6 @@
#include "layers/MeanLayer.hpp"
#include "layers/MemCopyLayer.hpp"
#include "layers/MergeLayer.hpp"
-#include "layers/MergerLayer.hpp"
#include "layers/MinimumLayer.hpp"
#include "layers/MultiplicationLayer.hpp"
#include "layers/NormalizationLayer.hpp"
@@ -83,6 +83,7 @@ DECLARE_LAYER(Activation)
DECLARE_LAYER(Addition)
DECLARE_LAYER(BatchNormalization)
DECLARE_LAYER(BatchToSpaceNd)
+DECLARE_LAYER(Concat)
DECLARE_LAYER(Constant)
DECLARE_LAYER(ConvertFp16ToFp32)
DECLARE_LAYER(ConvertFp32ToFp16)
@@ -105,7 +106,6 @@ DECLARE_LAYER(Maximum)
DECLARE_LAYER(Mean)
DECLARE_LAYER(MemCopy)
DECLARE_LAYER(Merge)
-DECLARE_LAYER(Merger)
DECLARE_LAYER(Minimum)
DECLARE_LAYER(Multiplication)
DECLARE_LAYER(Normalization)
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 7f00dbee87..3c7dfb07a9 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -82,7 +82,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net)
//First create tensor handlers, backends and workload factories.
//Handlers are created before workloads are.
//Because workload creation can modify some of the handlers,
- //(for example the splitter and merger layers).
+ //(for example the splitter and concat layers).
for (auto&& layer : order)
{
auto const& backend = layer->GetBackendId();
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index b80e0e7eec..6bd365bab8 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -603,12 +603,10 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto
return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
}
-IConnectableLayer* Network::AddConcatLayer(const OriginsDescriptor& mergerDescriptor,
+IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return AddMergerLayer(mergerDescriptor, name);
- ARMNN_NO_DEPRECATE_WARN_END
+ return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
}
IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
@@ -766,10 +764,10 @@ IConnectableLayer* Network::AddMinimumLayer(const char* name)
return m_Graph->AddLayer<MinimumLayer>(name);
}
-IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
+IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
const char* name)
{
- return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name);
+ return AddConcatLayer(mergerDescriptor, name);
}
IConnectableLayer* Network::AddAdditionLayer(const char* name)
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index d26c2864ff..52a27141c5 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -37,7 +37,7 @@ public:
IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override;
- IConnectableLayer* AddConcatLayer(const OriginsDescriptor& mergerDescriptor,
+ IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name = nullptr) override;
IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
@@ -119,7 +119,7 @@ public:
const char* name = nullptr) override;
ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
- IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
+ IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
const char* name = nullptr) override;
IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override;
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index f30ab5247c..47ddc4ed29 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -122,6 +122,15 @@ void QuantizerVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
SetQuantizedInputConnections(layer, newLayer);
}
+void QuantizerVisitor::VisitConcatLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& originsDescriptor,
+ const char* name)
+{
+ IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(originsDescriptor, name);
+ RecordLayer(layer, newLayer);
+ SetQuantizedInputConnections(layer, newLayer);
+}
+
void QuantizerVisitor::VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name)
@@ -238,15 +247,6 @@ void QuantizerVisitor::VisitMeanLayer(const IConnectableLayer* layer,
SetQuantizedInputConnections(layer, newLayer);
}
-void QuantizerVisitor::VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name)
-{
- IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(mergerDescriptor, name);
- RecordLayer(layer, newLayer);
- SetQuantizedInputConnections(layer, newLayer);
-}
-
void QuantizerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer,
const char* name)
{
diff --git a/src/armnn/QuantizerVisitor.hpp b/src/armnn/QuantizerVisitor.hpp
index 5d00e31fba..6e5609df02 100644
--- a/src/armnn/QuantizerVisitor.hpp
+++ b/src/armnn/QuantizerVisitor.hpp
@@ -50,6 +50,10 @@ public:
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override;
+ void VisitConcatLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& originsDescriptor,
+ const char* name = nullptr) override;
+
void VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name = nullptr) override;
@@ -78,10 +82,6 @@ public:
const MeanDescriptor& meanDescriptor,
const char* name = nullptr) override;
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) override;
-
void VisitMultiplicationLayer(const IConnectableLayer* layer,
const char* name = nullptr) override;
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 815730be31..d437a99931 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -152,6 +152,24 @@ void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
SetRange(layer, 0, 0.f, 1.f);
}
+void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& originsDescriptor,
+ const char* name)
+{
+ float min = std::numeric_limits<float>::max();
+ float max = std::numeric_limits<float>::lowest();
+ for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
+ {
+ const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
+ LayerGuid layerId = outputSlot->GetOwningLayerGuid();
+ unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
+ RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
+ min = std::min(min, range.first);
+ max = std::max(max, range.second);
+ }
+ SetRange(layer, 0, min, max);
+}
+
void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name)
@@ -180,24 +198,6 @@ void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer,
SetRange(layer, 0, min, max);
}
-void StaticRangeVisitor::VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name)
-{
- float min = std::numeric_limits<float>::max();
- float max = std::numeric_limits<float>::lowest();
- for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
- {
- const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
- LayerGuid layerId = outputSlot->GetOwningLayerGuid();
- unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
- RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
- min = std::min(min, range.first);
- max = std::max(max, range.second);
- }
- SetRange(layer, 0, min, max);
-}
-
void StaticRangeVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
diff --git a/src/armnn/StaticRangeVisitor.hpp b/src/armnn/StaticRangeVisitor.hpp
index 8f2e698a7e..a393a8e18f 100644
--- a/src/armnn/StaticRangeVisitor.hpp
+++ b/src/armnn/StaticRangeVisitor.hpp
@@ -71,14 +71,14 @@ public:
const SoftmaxDescriptor& softmaxDescriptor,
const char* name = nullptr) override;
+ void VisitConcatLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& originsDescriptor,
+ const char* name = nullptr) override;
+
void VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name = nullptr) override;
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) override;
-
void VisitReshapeLayer(const IConnectableLayer* layer,
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/MergerLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 9dbfdcc35d..1d2641cd60 100644
--- a/src/armnn/layers/MergerLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "MergerLayer.hpp"
+#include "ConcatLayer.hpp"
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
@@ -14,14 +14,14 @@
namespace armnn
{
-MergerLayer::MergerLayer(const OriginsDescriptor& param, const char* name)
- : LayerWithParameters(param.GetNumViews(), 1, LayerType::Merger, param, name)
+ConcatLayer::ConcatLayer(const OriginsDescriptor& param, const char* name)
+ : LayerWithParameters(param.GetNumViews(), 1, LayerType::Concat, param, name)
{
}
-std::unique_ptr<IWorkload> MergerLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
{
- MergerQueueDescriptor descriptor;
+ ConcatQueueDescriptor descriptor;
// Copies the view origins to the descriptor.
descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews());
@@ -34,24 +34,24 @@ std::unique_ptr<IWorkload> MergerLayer::CreateWorkload(const Graph& graph, const
return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor, graph));
}
-void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
+void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory)
{
- //If sub tensors are supported then the merger
+ //If sub tensors are supported then the concat
//just needs to make sure that the outputs of the prev layer
- //are made subtensors of the output of the merger layer.
+ //are made subtensors of the output of the concat layer.
m_OutputHandlers[0].CreateTensorHandles(factory);
if (factory.SupportsSubTensors())
{
- std::queue<MergerLayer*> m_MergerLayers;
+ std::queue<ConcatLayer*> m_ConcatLayers;
- m_MergerLayers.push(this);
- while (!m_MergerLayers.empty())
+ m_ConcatLayers.push(this);
+ while (!m_ConcatLayers.empty())
{
- MergerLayer* currentLayer = m_MergerLayers.front();
+ ConcatLayer* currentLayer = m_ConcatLayers.front();
ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData();
const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo();
- m_MergerLayers.pop();
+ m_ConcatLayers.pop();
const unsigned int numInputSlots = currentLayer->GetNumInputSlots();
@@ -99,14 +99,14 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact
OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
OutputHandler& outputHandler = slot->GetOutputHandler();
- BOOST_ASSERT_MSG(subTensor, "MergerLayer: Expected a valid sub-tensor for substitution.");
+ BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
outputHandler.SetData(std::move(subTensor));
Layer& inputLayer = slot->GetOwningLayer();
- if (inputLayer.GetType() == LayerType::Merger)
+ if (inputLayer.GetType() == LayerType::Concat)
{
- // Continue with the substitution if the connected inputs are also merger layers
- m_MergerLayers.push(boost::polymorphic_downcast<MergerLayer*>(&inputLayer));
+ // Continue with the substitution if the connected inputs are also concat layers
+ m_ConcatLayers.push(boost::polymorphic_downcast<ConcatLayer*>(&inputLayer));
}
++i;
}
@@ -114,12 +114,12 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact
}
}
-MergerLayer* MergerLayer::Clone(Graph& graph) const
+ConcatLayer* ConcatLayer::Clone(Graph& graph) const
{
- return CloneBase<MergerLayer>(graph, m_Param, GetName());
+ return CloneBase<ConcatLayer>(graph, m_Param, GetName());
}
-std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
@@ -129,7 +129,7 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
auto& inputShape = inputShapes[i];
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: Num Dimensions must match all inputs.",
+ "ConcatLayer: Num Dimensions must match all inputs.",
numDims,
inputShape.GetNumDimensions());
}
@@ -151,7 +151,7 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
// Checks that the bounding box starts at the origin.
if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; }))
{
- throw LayerValidationException("MergerLayer: there is no view that starts at the origin");
+ throw LayerValidationException("ConcatLayer: there is no view that starts at the origin");
}
// Checks that there are no overlaps of views (this would lead to undefined output at those locations).
@@ -182,7 +182,7 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
}
if (allAxesOverlap)
{
- throw LayerValidationException("MergerLayer: Some views overlap.");
+ throw LayerValidationException("ConcatLayer: Some views overlap.");
}
}
}
@@ -202,18 +202,18 @@ std::vector<TensorShape> MergerLayer::InferOutputShapes(const std::vector<Tensor
}
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: there are some gaps between views",
+ "ConcatLayer: there are some gaps between views",
totalViewsVolume,
outputVolume);
return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
}
-void MergerLayer::ValidateTensorShapesFromInputs()
+void ConcatLayer::ValidateTensorShapesFromInputs()
{
- // Validates Merger layer.
+ // Validates Concat layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: Num Inputs must match num views.",
+ "ConcatLayer: Num Inputs must match num views.",
m_Param.GetNumViews(),
GetNumInputSlots());
@@ -230,14 +230,14 @@ void MergerLayer::ValidateTensorShapesFromInputs()
BOOST_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
- "MergerLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
GetOutputSlot(0).GetTensorInfo().GetShape(),
inferredShapes[0]);
}
-void MergerLayer::Accept(ILayerVisitor& visitor) const
+void ConcatLayer::Accept(ILayerVisitor& visitor) const
{
- visitor.VisitMergerLayer(this, GetParameters(), GetName());
+ visitor.VisitConcatLayer(this, GetParameters(), GetName());
}
} // namespace armnn armnn
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
new file mode 100644
index 0000000000..4268291916
--- /dev/null
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a merge operation.
+class ConcatLayer : public LayerWithParameters<OriginsDescriptor>
+{
+public:
+ /// Makes a workload for the Concat type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+ /// otherwise creates tensor handlers.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ConcatLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ConcatLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ConcatLayer.
+ /// @param [in] param OriginsDescriptor to configure the concat operation.
+ /// @param [in] name Optional name for the layer.
+ ConcatLayer(const OriginsDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ConcatLayer() = default;
+};
+
+} // namespace
diff --git a/src/armnn/layers/MergerLayer.hpp b/src/armnn/layers/MergerLayer.hpp
index 6f0c1489d4..32710609eb 100644
--- a/src/armnn/layers/MergerLayer.hpp
+++ b/src/armnn/layers/MergerLayer.hpp
@@ -4,52 +4,6 @@
//
#pragma once
-#include "LayerWithParameters.hpp"
+#include "ConcatLayer.hpp"
-namespace armnn
-{
-
-/// This layer represents a merge operation.
-class MergerLayer : public LayerWithParameters<OriginsDescriptor>
-{
-public:
- /// Makes a workload for the Merger type.
- /// @param [in] graph The graph where this layer can be found.
- /// @param [in] factory The workload factory which will create the workload.
- /// @return A pointer to the created workload, or nullptr if not created.
- virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
-
- /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
- /// otherwise creates tensor handlers.
- /// @param [in] graph The graph where this layer can be found.
- /// @param [in] factory The workload factory which will create the workload.
- virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
-
- /// Creates a dynamically-allocated copy of this layer.
- /// @param [in] graph The graph into which this layer is being cloned.
- MergerLayer* Clone(Graph& graph) const override;
-
- /// Check if the input tensor shape(s)
- /// will lead to a valid configuration of @ref MergerLayer.
- void ValidateTensorShapesFromInputs() override;
-
- /// By default returns inputShapes if the number of inputs are equal to number of outputs,
- /// otherwise infers the output shapes from given input shapes and layer properties.
- /// @param [in] inputShapes The input shapes layer has.
- /// @return A vector to the inferred output shape.
- std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
-
- void Accept(ILayerVisitor& visitor) const override;
-
-protected:
- /// Constructor to create a MergerLayer.
- /// @param [in] param OriginsDescriptor to configure the merger operation.
- /// @param [in] name Optional name for the layer.
- MergerLayer(const OriginsDescriptor& param, const char* name);
-
- /// Default destructor
- ~MergerLayer() = default;
-};
-
-} // namespace
+using MergerLayer = ConcatLayer; \ No newline at end of file
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index be52eadb57..135a4421cd 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -706,10 +706,10 @@ std::unique_ptr<SplitterWorkload>
return workload;
}
-/// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads.
-template<typename SplitterWorkload, typename MergerWorkload, armnn::DataType DataType>
-std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
- CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads.
+template<typename SplitterWorkload, typename ConcatWorkload, armnn::DataType DataType>
+std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
+ CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph)
{
armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
@@ -733,41 +733,41 @@ std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
BOOST_TEST_CHECKPOINT("created splitter layer");
- armnn::OriginsDescriptor mergerViews(2);
- mergerViews.SetViewOriginCoord(0, 0, 0);
- mergerViews.SetViewOriginCoord(0, 1, 1);
- mergerViews.SetViewOriginCoord(0, 2, 0);
- mergerViews.SetViewOriginCoord(0, 3, 0);
+ armnn::OriginsDescriptor concatViews(2);
+ concatViews.SetViewOriginCoord(0, 0, 0);
+ concatViews.SetViewOriginCoord(0, 1, 1);
+ concatViews.SetViewOriginCoord(0, 2, 0);
+ concatViews.SetViewOriginCoord(0, 3, 0);
- mergerViews.SetViewOriginCoord(1, 0, 0);
- mergerViews.SetViewOriginCoord(1, 1, 0);
- mergerViews.SetViewOriginCoord(1, 2, 0);
- mergerViews.SetViewOriginCoord(1, 3, 0);
+ concatViews.SetViewOriginCoord(1, 0, 0);
+ concatViews.SetViewOriginCoord(1, 1, 0);
+ concatViews.SetViewOriginCoord(1, 2, 0);
+ concatViews.SetViewOriginCoord(1, 3, 0);
- Layer* const merger = graph.AddLayer<MergerLayer>(mergerViews, "merger");
- BOOST_TEST_CHECKPOINT("created merger layer");
+ Layer* const concat = graph.AddLayer<ConcatLayer>(concatViews, "concat");
+ BOOST_TEST_CHECKPOINT("created concat layer");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// Adds connections.
Connect(input, splitter, inputTensorInfo, 0, 0);
BOOST_TEST_CHECKPOINT("connect input to splitter");
- Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up.
- BOOST_TEST_CHECKPOINT("connect splitter[0] to merger[1]");
- Connect(splitter, merger, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
- BOOST_TEST_CHECKPOINT("connect splitter[1] to merger[0]");
- Connect(merger, output, inputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect merger to output");
+ Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up.
+ BOOST_TEST_CHECKPOINT("connect splitter[0] to concat[1]");
+ Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
+ BOOST_TEST_CHECKPOINT("connect splitter[1] to concat[0]");
+ Connect(concat, output, inputTensorInfo, 0, 0);
+ BOOST_TEST_CHECKPOINT("connect concat to output");
CreateTensorHandles(graph, factory);
BOOST_TEST_CHECKPOINT("created tensor handles");
auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
BOOST_TEST_CHECKPOINT("created splitter workload");
- auto workloadMerger = MakeAndCheckWorkload<MergerWorkload>(*merger, graph, factory);
- BOOST_TEST_CHECKPOINT("created merger workload");
+ auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, graph, factory);
+ BOOST_TEST_CHECKPOINT("created concat workload");
- return {std::move(workloadSplitter), std::move(workloadMerger)};
+ return {std::move(workloadSplitter), std::move(workloadConcat)};
}
@@ -1053,10 +1053,10 @@ std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& fa
return workload;
}
-template<typename MergerWorkload, armnn::DataType DataType>
-std::unique_ptr<MergerWorkload> CreateMergerWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- const armnn::TensorShape& outputShape,
+template<typename ConcatWorkload, armnn::DataType DataType>
+std::unique_ptr<ConcatWorkload> CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory,
+ armnn::Graph &graph,
+ const armnn::TensorShape &outputShape,
unsigned int concatAxis)
{
armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType);
@@ -1073,26 +1073,26 @@ std::unique_ptr<MergerWorkload> CreateMergerWorkloadTest(armnn::IWorkloadFactory
inputShapes.end(),
concatAxis);
- Layer* const merger = graph.AddLayer<MergerLayer>(descriptor, "merger");
- BOOST_TEST_CHECKPOINT("created merger layer");
+ Layer* const concat = graph.AddLayer<ConcatLayer>(descriptor, "concat");
+ BOOST_TEST_CHECKPOINT("created concat layer");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// Adds connections.
- Connect(input0, merger, inputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect input0 to merger");
- Connect(input1, merger, inputTensorInfo, 0, 1);
- BOOST_TEST_CHECKPOINT("connect input1 to merger");
- Connect(merger, output, outputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect merger to output");
+ Connect(input0, concat, inputTensorInfo, 0, 0);
+ BOOST_TEST_CHECKPOINT("connect input0 to concat");
+ Connect(input1, concat, inputTensorInfo, 0, 1);
+ BOOST_TEST_CHECKPOINT("connect input1 to concat");
+ Connect(concat, output, outputTensorInfo, 0, 0);
+ BOOST_TEST_CHECKPOINT("connect concat to output");
CreateTensorHandles(graph, factory);
BOOST_TEST_CHECKPOINT("created tensor handles");
- auto workloadMerger = MakeAndCheckWorkload<MergerWorkload>(*merger, graph, factory);
- BOOST_TEST_CHECKPOINT("created merger workload");
+ auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, graph, factory);
+ BOOST_TEST_CHECKPOINT("created concat workload");
- return std::move(workloadMerger);
+ return std::move(workloadConcat);
}
template <typename PreCompiledWorkload, armnn::DataType dataType>
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index cca4653509..0777d98d3a 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -454,18 +454,18 @@ struct CopyLayersFixture
convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
- armnn::OriginsDescriptor mergerDefaults(2);
- Layer* const mergerLayer = AddLayer<MergerLayer>(mergerDefaults, "merger");
- mergerLayer->SetBackendId(armnn::Compute::CpuRef);
+ armnn::OriginsDescriptor concatDefaults(2);
+ Layer* const concatLayer = AddLayer<ConcatLayer>(concatDefaults, "concat");
+ concatLayer->SetBackendId(armnn::Compute::CpuRef);
- convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
+ convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
armnn::ActivationDescriptor activationDefaults;
Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act");
actLayer->SetBackendId(armnn::Compute::CpuRef);
- mergerLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
+ concatLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
armnn::SoftmaxDescriptor softmaxDefaults;
Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax");
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 47fd67b8d4..14b67a1f4a 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -226,7 +226,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
checkOneOutputToOneInputConnection(multiplicationLayer, outputLayer, 2, 0);
}
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger)
+BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
{
armnn::Network net;
@@ -255,22 +255,20 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger)
splitterLayer->GetOutputSlot(1).Connect(softmaxLayer2->GetInputSlot(0));
- // Adds a merger layer.
- armnn::OriginsDescriptor mergerDesc(2, 4);
+ // Adds a concat layer.
+ armnn::OriginsDescriptor concatDesc(2, 4);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- armnn::IConnectableLayer* mergerLayer = net.AddMergerLayer(mergerDesc, "merger layer");
- ARMNN_NO_DEPRECATE_WARN_END
- BOOST_TEST(mergerLayer);
+ armnn::IConnectableLayer* concatLayer = net.AddConcatLayer(concatDesc, "concat layer");
+ BOOST_TEST(concatLayer);
- softmaxLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- softmaxLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
+ softmaxLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ softmaxLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
// Adds an output layer.
armnn::IConnectableLayer* outputLayer = net.AddOutputLayer(0, "output layer");
BOOST_TEST(outputLayer);
- mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
BOOST_TEST(splitterLayer->GetNumOutputSlots() == 2);
BOOST_TEST(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0));
@@ -278,11 +276,11 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger)
BOOST_TEST(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0));
BOOST_TEST(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection());
- BOOST_TEST(mergerLayer->GetNumInputSlots() == 2);
- BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(0));
- BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == mergerLayer->GetInputSlot(0).GetConnection());
- BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(1));
- BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == mergerLayer->GetInputSlot(1).GetConnection());
+ BOOST_TEST(concatLayer->GetNumInputSlots() == 2);
+ BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(0));
+ BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == concatLayer->GetInputSlot(0).GetConnection());
+ BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(1));
+ BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == concatLayer->GetInputSlot(1).GetConnection());
}
BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index f2c739d274..337c61585f 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -1238,15 +1238,15 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
-BOOST_AUTO_TEST_CASE(QuantizeMerger)
+BOOST_AUTO_TEST_CASE(QuantizeConcat)
{
- class TestMergerQuantization : public TestQuantization
+ class TestConcatQuantization : public TestQuantization
{
public:
- TestMergerQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
+ TestConcatQuantization(const TensorShape& inputShape, const TensorShape& outputShape)
: TestQuantization(inputShape, outputShape) {}
- TestMergerQuantization(const QuantizerOptions& options,
+ TestConcatQuantization(const QuantizerOptions& options,
const TensorShape& inputShape,
const TensorShape& outputShape)
: TestQuantization(options, inputShape, outputShape) {}
@@ -1259,8 +1259,8 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger)
LayerBindingId id,
const char* name = nullptr) override
{}
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
+ void VisitConcatLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& originsDescriptor,
const char* name = nullptr) override
{
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -1277,17 +1277,15 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger)
IConnectableLayer* input2 = network->AddInputLayer(2);
OriginsDescriptor descriptor(3, 1);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer* merger = network->AddMergerLayer(descriptor);
- ARMNN_NO_DEPRECATE_WARN_END
+ IConnectableLayer* concatLayer = network->AddConcatLayer(descriptor);
IConnectableLayer* output0 = network->AddOutputLayer(3);
// Establish connections
- input0->GetOutputSlot(0).Connect(merger->GetInputSlot(0));
- input1->GetOutputSlot(0).Connect(merger->GetInputSlot(1));
- input2->GetOutputSlot(0).Connect(merger->GetInputSlot(2));
- merger->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
+ input0->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
+ input2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(2));
+ concatLayer->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
// Set TensorInfo
const TensorShape shape{1U};
@@ -1296,7 +1294,7 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger)
input0->GetOutputSlot(0).SetTensorInfo(info);
input1->GetOutputSlot(0).SetTensorInfo(info);
input2->GetOutputSlot(0).SetTensorInfo(info);
- merger->GetOutputSlot(0).SetTensorInfo(info);
+ concatLayer->GetOutputSlot(0).SetTensorInfo(info);
const QuantizerOptions options(DataType::QuantisedSymm16);
INetworkQuantizerPtr quantizerPtrQAsymm8 = INetworkQuantizer::Create(network.get());
@@ -1314,11 +1312,11 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger)
quantizerPtrQSymm16->OverrideInputRange(2, min, (max - 7.8f));
INetworkPtr quantizedNetworkQAsymm8 = quantizerPtrQAsymm8->ExportNetwork();
- TestMergerQuantization validatorQAsymm8(shape, shape);
+ TestConcatQuantization validatorQAsymm8(shape, shape);
VisitLayersTopologically(quantizedNetworkQAsymm8.get(), validatorQAsymm8);
INetworkPtr quantizedNetworkQSymm16 = quantizerPtrQSymm16->ExportNetwork();
- TestMergerQuantization validatorQSymm16(options, shape, shape);
+ TestConcatQuantization validatorQSymm16(options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
}
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 8369fc4c79..3e762e2de5 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -223,21 +223,21 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
- OriginsDescriptor mergerDescriptor(2);
- Layer* const mergerLayer = graph.AddLayer<MergerLayer>(mergerDescriptor, "merger");
+ OriginsDescriptor concatDescriptor(2);
+ Layer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0));
- convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
- mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
+ concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
- CreateOutputsFrom({mergerLayer}),
+ CreateOutputsFrom({concatLayer}),
{});
// Save sub-graph connections for comparison after substitution
@@ -270,8 +270,8 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
Convolution2dDescriptor convDescriptor;
Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
- OriginsDescriptor mergerDescriptor(2);
- Layer* const mergerLayer = graph.AddLayer<MergerLayer>(mergerDescriptor, "merger");
+ OriginsDescriptor concatDescriptor(2);
+ Layer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
ViewsDescriptor splitterDescriptor(2);
@@ -280,9 +280,9 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0));
- convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
- mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
+ concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({splitterLayer}),
@@ -323,17 +323,17 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
- OriginsDescriptor mergerDescriptor(2);
- Layer* const mergerLayer = graph.AddLayer<MergerLayer>(mergerDescriptor, "merger");
+ OriginsDescriptor concatDescriptor(2);
+ Layer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0));
- convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
- mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
+ concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
@@ -376,8 +376,8 @@ BOOST_AUTO_TEST_CASE(EraseReplacedLayers)
Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
- OriginsDescriptor mergerDescriptor(2);
- Layer* const mergerLayer = graph.AddLayer<MergerLayer>(mergerDescriptor, "merger");
+ OriginsDescriptor concatDescriptor(2);
+ Layer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
graph.AddLayer<OutputLayer>(0, "output");
@@ -387,7 +387,7 @@ BOOST_AUTO_TEST_CASE(EraseReplacedLayers)
{splitterLayer,
convLayer1,
convLayer2,
- mergerLayer});
+ concatLayer});
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(0, 0);
@@ -538,8 +538,8 @@ BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
//
Graph graph;
- OriginsDescriptor mergerDescriptor(2);
- auto x2 = graph.AddLayer<MergerLayer>(mergerDescriptor, "x2");
+ OriginsDescriptor concatDescriptor(2);
+ auto x2 = graph.AddLayer<ConcatLayer>(concatDescriptor, "x2");
auto m3 = graph.InsertNewLayer<ActivationLayer>(x2->GetInputSlot(0),
ActivationDescriptor{},
"m3");
@@ -856,14 +856,14 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
Graph graph;
ActivationDescriptor activationDefaults;
- OriginsDescriptor mergerDescriptor(2);
+ OriginsDescriptor concatDescriptor(2);
auto x1 = graph.AddLayer<InputLayer>(0, "x1");
auto x2 = graph.AddLayer<InputLayer>(1, "x2");
auto m1 = graph.AddLayer<ActivationLayer>(activationDefaults, "m1");
auto m2 = graph.AddLayer<ActivationLayer>(activationDefaults, "m2");
- auto m3 = graph.AddLayer<MergerLayer>(mergerDescriptor, "m3");
+ auto m3 = graph.AddLayer<ConcatLayer>(concatDescriptor, "m3");
auto m4 = graph.AddLayer<ActivationLayer>(activationDefaults, "m4");
auto m5 = graph.AddLayer<ActivationLayer>(activationDefaults, "m5");
@@ -887,11 +887,11 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
SubgraphViewSelector::Subgraphs subgraphs =
SubgraphViewSelector::SelectSubgraphs(
graph,
- // select Activation and Merger Layers M1, M2, M3, M4, M5
+ // select Activation and Concat Layers M1, M2, M3, M4, M5
[](const Layer & l)
{
bool toSelect = (l.GetType() == LayerType::Activation
- || l.GetType() == LayerType::Merger);
+ || l.GetType() == LayerType::Concat);
return toSelect;
});
@@ -994,18 +994,18 @@ BOOST_AUTO_TEST_CASE(MultipleSubgraphs)
Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
- OriginsDescriptor mergerDescriptor(2);
- Layer* const mergerLayer = graph.AddLayer<MergerLayer>(mergerDescriptor, "merger");
- mergerLayer->SetBackendId(Compute::CpuAcc);
+ OriginsDescriptor concatDescriptor(2);
+ Layer* const pConcatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
+ pConcatLayer->SetBackendId(Compute::CpuAcc);
Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
splitterLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
splitterLayer->GetOutputSlot(1).Connect(convLayer2->GetInputSlot(0));
- convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
- convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
- mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(pConcatLayer->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(pConcatLayer->GetInputSlot(1));
+ pConcatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// CpuAcc sub graph selector
SubgraphViewSelector::Subgraphs subgraphs =
@@ -1096,7 +1096,7 @@ BOOST_AUTO_TEST_CASE(SubgraphCycles)
//
Graph graph;
- OriginsDescriptor mergerDescriptor(2);
+ OriginsDescriptor originsDescriptor(2);
auto x0 = graph.AddLayer<InputLayer>(0, "x0");
auto m0 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "m0");
auto x1 = graph.AddLayer<ActivationLayer>(ActivationDescriptor{}, "x1");
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index f94906d10d..478f0293a4 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -20,7 +20,7 @@ void Set2dDataValues(SplitterDescriptor descriptor, u_int32_t value)
}
}
-void Set2dDataValues(MergerDescriptor descriptor, u_int32_t value)
+void Set2dDataValues(OriginsDescriptor& descriptor, u_int32_t value)
{
for (unsigned int i = 0; i < descriptor.GetNumViews(); ++i)
{
@@ -230,32 +230,28 @@ BOOST_AUTO_TEST_CASE(CheckSplitterLayerVisitorNameNullAndDescriptor)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckMergerLayerVisitorNameAndDescriptor)
+BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameAndDescriptor)
{
- const char* layerName = "MergerLayer";
- MergerDescriptor descriptor(2, 2);
+ const char* layerName = "ConcatLayer";
+ OriginsDescriptor descriptor(2, 2);
Set2dDataValues(descriptor, 1);
descriptor.SetConcatAxis(1);
- TestMergerLayerVisitor visitor(descriptor, layerName);
+ TestConcatLayerVisitor visitor(descriptor, layerName);
Network net;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer *const layer = net.AddMergerLayer(descriptor, layerName);
- ARMNN_NO_DEPRECATE_WARN_END
+ IConnectableLayer *const layer = net.AddConcatLayer(descriptor, layerName);
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckMergerLayerVisitorNameNullAndDescriptor)
+BOOST_AUTO_TEST_CASE(CheckConcatLayerVisitorNameNullAndDescriptor)
{
- MergerDescriptor descriptor(2, 2);
+ OriginsDescriptor descriptor(2, 2);
Set2dDataValues(descriptor, 1);
descriptor.SetConcatAxis(1);
- TestMergerLayerVisitor visitor(descriptor);
+ TestConcatLayerVisitor visitor(descriptor);
Network net;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer *const layer = net.AddMergerLayer(descriptor);
- ARMNN_NO_DEPRECATE_WARN_END
+ IConnectableLayer *const layer = net.AddConcatLayer(descriptor);
layer->Accept(visitor);
}
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index bf23332fb8..0db956d36d 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -317,70 +317,70 @@ public:
};
};
-class TestMergerLayerVisitor : public TestLayerVisitor
+class TestConcatLayerVisitor : public TestLayerVisitor
{
private:
OriginsDescriptor m_VisitorDescriptor;
public:
- explicit TestMergerLayerVisitor(const OriginsDescriptor& mergerDescriptor, const char* name = nullptr)
+ explicit TestConcatLayerVisitor(const OriginsDescriptor& concatDescriptor, const char* name = nullptr)
: TestLayerVisitor(name)
- , m_VisitorDescriptor(mergerDescriptor.GetNumViews(), mergerDescriptor.GetNumDimensions())
+ , m_VisitorDescriptor(concatDescriptor.GetNumViews(), concatDescriptor.GetNumDimensions())
{
- m_VisitorDescriptor.SetConcatAxis(mergerDescriptor.GetConcatAxis());
+ m_VisitorDescriptor.SetConcatAxis(concatDescriptor.GetConcatAxis());
- if (mergerDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
+ if (concatDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
{
BOOST_ERROR("Unequal number of views in splitter descriptor.");
}
- else if (mergerDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
+ else if (concatDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
{
BOOST_ERROR("Unequal number of dimensions in splitter descriptor.");
}
else
{
- for (unsigned int i = 0; i < mergerDescriptor.GetNumViews(); ++i)
+ for (unsigned int i = 0; i < concatDescriptor.GetNumViews(); ++i)
{
- for (unsigned int j = 0; j < mergerDescriptor.GetNumDimensions(); ++j)
+ for (unsigned int j = 0; j < concatDescriptor.GetNumDimensions(); ++j)
{
- m_VisitorDescriptor.SetViewOriginCoord(i, j, mergerDescriptor.GetViewOrigin(i)[j]);
+ m_VisitorDescriptor.SetViewOriginCoord(i, j, concatDescriptor.GetViewOrigin(i)[j]);
}
}
}
};
- void CheckDescriptor(const OriginsDescriptor& mergerDescriptor)
+ void CheckDescriptor(const OriginsDescriptor& concatDescriptor)
{
- BOOST_CHECK_EQUAL(mergerDescriptor.GetNumViews(), m_VisitorDescriptor.GetNumViews());
- BOOST_CHECK_EQUAL(mergerDescriptor.GetNumDimensions(), m_VisitorDescriptor.GetNumDimensions());
- BOOST_CHECK_EQUAL(mergerDescriptor.GetConcatAxis(), m_VisitorDescriptor.GetConcatAxis());
+ BOOST_CHECK_EQUAL(concatDescriptor.GetNumViews(), m_VisitorDescriptor.GetNumViews());
+ BOOST_CHECK_EQUAL(concatDescriptor.GetNumDimensions(), m_VisitorDescriptor.GetNumDimensions());
+ BOOST_CHECK_EQUAL(concatDescriptor.GetConcatAxis(), m_VisitorDescriptor.GetConcatAxis());
- if (mergerDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
+ if (concatDescriptor.GetNumViews() != m_VisitorDescriptor.GetNumViews())
{
BOOST_ERROR("Unequal number of views in splitter descriptor.");
}
- else if (mergerDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
+ else if (concatDescriptor.GetNumDimensions() != m_VisitorDescriptor.GetNumDimensions())
{
BOOST_ERROR("Unequal number of dimensions in splitter descriptor.");
}
else
{
- for (unsigned int i = 0; i < mergerDescriptor.GetNumViews(); ++i)
+ for (unsigned int i = 0; i < concatDescriptor.GetNumViews(); ++i)
{
- for (unsigned int j = 0; j < mergerDescriptor.GetNumDimensions(); ++j)
+ for (unsigned int j = 0; j < concatDescriptor.GetNumDimensions(); ++j)
{
- BOOST_CHECK_EQUAL(mergerDescriptor.GetViewOrigin(i)[j], m_VisitorDescriptor.GetViewOrigin(i)[j]);
+ BOOST_CHECK_EQUAL(concatDescriptor.GetViewOrigin(i)[j], m_VisitorDescriptor.GetViewOrigin(i)[j]);
}
}
}
}
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
+ void VisitConcatLayer(const IConnectableLayer* layer,
+ const OriginsDescriptor& concatDescriptor,
const char* name = nullptr) override
{
CheckLayerPointer(layer);
- CheckDescriptor(mergerDescriptor);
+ CheckDescriptor(concatDescriptor);
CheckLayerName(name);
};
};