aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp6
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp22
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp8
-rw-r--r--src/backends/backendsCommon/WorkloadDataFwd.hpp2
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp12
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp2
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/ConcatTestImpl.hpp (renamed from src/backends/backendsCommon/test/MergerTestImpl.hpp)28
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp14
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp26
-rw-r--r--src/backends/backendsCommon/test/QuantizeHelper.hpp1
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp12
-rw-r--r--src/backends/cl/ClLayerSupport.cpp76
-rw-r--r--src/backends/cl/ClLayerSupport.hpp4
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp2
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp2
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp56
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp26
-rw-r--r--src/backends/cl/workloads/ClConcatWorkload.cpp12
-rw-r--r--src/backends/cl/workloads/ClConcatWorkload.hpp6
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp72
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp4
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp2
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp48
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp26
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.cpp12
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.hpp8
-rw-r--r--src/backends/reference/RefLayerSupport.cpp55
-rw-r--r--src/backends/reference/RefLayerSupport.hpp4
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp2
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp70
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp34
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/Concatenate.cpp (renamed from src/backends/reference/workloads/Merger.cpp)6
-rw-r--r--src/backends/reference/workloads/Concatenate.hpp (renamed from src/backends/reference/workloads/Merger.hpp)2
-rw-r--r--src/backends/reference/workloads/RefConcatWorkload.cpp4
-rw-r--r--src/backends/reference/workloads/RefConcatWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp2
41 files changed, 339 insertions, 345 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 9fcb496ba3..71b17456c1 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -73,9 +73,7 @@ bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> in
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
@@ -286,7 +284,7 @@ bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> in
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+ return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index ea84c0b9f2..9bb95f67af 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -378,26 +378,26 @@ void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
//---------------------------------------------------------------
-void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
- ValidateNumOutputs(workloadInfo, "MergerQueueDescriptor", 1);
+ ValidateNumOutputs(workloadInfo, "ConcatQueueDescriptor", 1);
if (m_Inputs.size() <= 0)
{
- throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
+ throw InvalidArgumentException("ConcatQueueDescriptor: At least one input needs to be provided.");
}
if (m_Outputs.size() <= 0)
{
- throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
+ throw InvalidArgumentException("ConcatQueueDescriptor: At least one output needs to be provided.");
}
if (workloadInfo.m_InputTensorInfos.size() <= 0)
{
- throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
+ throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo input needs to be provided.");
}
if (workloadInfo.m_OutputTensorInfos.size() <= 0)
{
- throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
+ throw InvalidArgumentException("ConcatQueueDescriptor: At least one TensorInfo output needs to be provided.");
}
if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
@@ -413,7 +413,7 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
{
throw InvalidArgumentException(
- "MergerQueueDescriptor: Number of split windows "
+ "ConcatQueueDescriptor: Number of split windows "
"has to match number of workloadInfo.m_InputTensorInfos. "
"Number of windows: " +
to_string(m_ViewOrigins.size()) +
@@ -428,7 +428,7 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ViewOrigin const& e = m_ViewOrigins[w];
if (e.m_Origin.size() != outputDims)
{
- throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
+ throw InvalidArgumentException("ConcatQueueDescriptor: Window origin have to "
"have the same dimensionality as the output tensor. "
"Window origin (index: " +
to_string(w) + ") has " + to_string(e.m_Origin.size()) +
@@ -442,7 +442,7 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
> workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
{
- throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
+ throw InvalidArgumentException("ConcatQueueDescriptor: Window extent coordinates have to "
"be smaller or equal than the size of the output in that coord.");
}
}
@@ -463,11 +463,11 @@ void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
ValidateDataTypes(workloadInfo.m_InputTensorInfos[i],
supportedTypes,
- "MergerQueueDescriptor");
+ "ConcatQueueDescriptor");
}
ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
{workloadInfo.m_InputTensorInfos[0].GetDataType()},
- "MergerQueueDescriptor");
+ "ConcatQueueDescriptor");
}
//---------------------------------------------------------------
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 689c6d26c6..3e33b946e9 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -9,6 +9,7 @@
#include <InternalTypes.hpp>
+#include <armnn/Deprecated.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
#include <armnn/Types.hpp>
@@ -87,8 +88,8 @@ struct SplitterQueueDescriptor : QueueDescriptorWithParameters<ViewsDescriptor>
void Validate(const WorkloadInfo& workloadInfo) const;
};
-// Merger layer workload data.
-struct MergerQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
+// Concat layer workload data.
+struct ConcatQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
{
struct ViewOrigin
{
@@ -106,6 +107,9 @@ struct MergerQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
void Validate(const WorkloadInfo& workloadInfo) const;
};
+// Deprecated. Use ConcatQueueDescriptor instead
+using MergerQueueDescriptor = ConcatQueueDescriptor;
+
// Activation layer workload data.
struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
{
diff --git a/src/backends/backendsCommon/WorkloadDataFwd.hpp b/src/backends/backendsCommon/WorkloadDataFwd.hpp
index 9fbd81b326..abee3166f4 100644
--- a/src/backends/backendsCommon/WorkloadDataFwd.hpp
+++ b/src/backends/backendsCommon/WorkloadDataFwd.hpp
@@ -12,7 +12,7 @@ template <typename LayerDescriptor>
struct QueueDescriptorWithParameters;
struct SoftmaxQueueDescriptor;
struct SplitterQueueDescriptor;
-struct MergerQueueDescriptor;
+struct ConcatQueueDescriptor;
struct ActivationQueueDescriptor;
struct FullyConnectedQueueDescriptor;
struct PermuteQueueDescriptor;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 763107123f..f026e1ecd6 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -512,9 +512,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
- case LayerType::Merger:
+ case LayerType::Concat:
{
- auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer);
+ auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
// Get vector of all inputs.
auto getTensorInfo = [&dataType](const InputSlot& slot)
@@ -535,9 +535,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- result = layerSupportObject->IsMergerSupported(inputPtrs, output, cLayer->GetParameters(), reason);
- ARMNN_NO_DEPRECATE_WARN_END
+ result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+
+
break;
}
case LayerType::Multiplication:
@@ -816,7 +816,7 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToS
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
return std::unique_ptr<IWorkload>();
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 0b0ba7ddf1..11c36eb774 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -61,7 +61,7 @@ public:
virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
- virtual std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+ virtual std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8050a0ab63..508fc77ba6 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -30,7 +30,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
LayerTests.hpp
LstmTestImpl.hpp
NormTestImpl.hpp
- MergerTestImpl.hpp
+ ConcatTestImpl.hpp
MockBackend.cpp
MockBackend.hpp
MockBackendId.hpp
diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/ConcatTestImpl.hpp
index 8483cf02d8..ded3857282 100644
--- a/src/backends/backendsCommon/test/MergerTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatTestImpl.hpp
@@ -18,8 +18,8 @@ namespace
{
template<typename armnn::DataType DataType>
-INetworkPtr CreateMergerNetwork(const std::vector<TensorShape>& inputShapes,
- const TensorShape& outputShape,
+INetworkPtr CreateConcatNetwork(const std::vector<TensorShape>& inputShapes,
+ const TensorShape &outputShape,
unsigned int concatAxis,
const float qScale = 1.0f,
const int32_t qOffset = 0)
@@ -33,26 +33,24 @@ INetworkPtr CreateMergerNetwork(const std::vector<TensorShape>& inputShapes,
descriptor = CreateDescriptorForConcatenation(inputShapes.begin(),
inputShapes.end(),
concatAxis);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger");
- ARMNN_NO_DEPRECATE_WARN_END
+ IConnectableLayer* concat = net->AddConcatLayer(descriptor, "concat");
for (unsigned int i = 0; i < inputShapes.size(); ++i)
{
TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
- Connect(input, merger, inputTensorInfo, 0, i);
+ Connect(input, concat, inputTensorInfo, 0, i);
}
TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
IConnectableLayer* output = net->AddOutputLayer(0, "output");
- Connect(merger, output, outputTensorInfo, 0, 0);
+ Connect(concat, output, outputTensorInfo, 0, 0);
return net;
}
template<armnn::DataType ArmnnType>
-void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim0EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
using T = ResolveType<ArmnnType>;
@@ -62,7 +60,7 @@ void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
const TensorShape& outputShape = { 4, 3, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
@@ -116,7 +114,7 @@ void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
}
template<armnn::DataType ArmnnType>
-void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim1EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
using T = ResolveType<ArmnnType>;
@@ -126,7 +124,7 @@ void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
const TensorShape& outputShape = { 2, 6, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
@@ -180,7 +178,7 @@ void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
}
template<armnn::DataType ArmnnType>
-void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim2EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
using T = ResolveType<ArmnnType>;
@@ -190,7 +188,7 @@ void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
const TensorShape& outputShape = { 2, 3, 4, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
@@ -244,7 +242,7 @@ void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
+void ConcatDim3EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
@@ -253,7 +251,7 @@ void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
const TensorShape& outputShape = { 2, 3, 2, 4 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 3ff7376e91..71614643c3 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -135,19 +135,19 @@ struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
};
template<>
-struct DummyLayer<armnn::MergerLayer>
+struct DummyLayer<armnn::ConcatLayer>
{
DummyLayer()
{
armnn::OriginsDescriptor desc(2);
- m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
+ m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
}
~DummyLayer()
{
dummyGraph.EraseLayer(m_Layer);
}
- armnn::MergerLayer* m_Layer;
+ armnn::ConcatLayer* m_Layer;
};
template<>
@@ -322,6 +322,8 @@ DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
+DECLARE_LAYER_POLICY_2_PARAM(Concat)
+
DECLARE_LAYER_POLICY_1_PARAM(Constant)
DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
@@ -364,10 +366,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Mean)
DECLARE_LAYER_POLICY_1_PARAM(Merge)
-ARMNN_NO_DEPRECATE_WARN_BEGIN
-DECLARE_LAYER_POLICY_2_PARAM(Merger)
-ARMNN_NO_DEPRECATE_WARN_END
-
DECLARE_LAYER_POLICY_1_PARAM(Minimum)
DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
@@ -422,7 +420,7 @@ unsigned int GetNumOutputs(const armnn::Layer& layer)
}
template<>
-unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
+unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
{
boost::ignore_unused(layer);
return 2;
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index c84a530454..402e86de00 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1362,10 +1362,10 @@ LayerTestResult<float,3> ConcatTest(
);
std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
- armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+ armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
- armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+ armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -1381,7 +1381,7 @@ LayerTestResult<float,3> ConcatTest(
workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
workloadFactory.CreateTensorHandle(inputTensorInfo2);
- armnn::MergerQueueDescriptor data;
+ armnn::ConcatQueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
@@ -3554,7 +3554,7 @@ void Concatenate(
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- armnn::MergerQueueDescriptor queueDescriptor;
+ armnn::ConcatQueueDescriptor queueDescriptor;
armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
queueDescriptor.m_Parameters = viewsDescriptor;
@@ -6625,10 +6625,10 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
inputTensorInfo2.SetQuantizationOffset(inputOffset2);
std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
- armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+ armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
- armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+ armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -6644,7 +6644,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
workloadFactory.CreateTensorHandle(inputTensorInfo2);
- armnn::MergerQueueDescriptor data;
+ armnn::ConcatQueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
@@ -6759,10 +6759,10 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
);
std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
- armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+ armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
- armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+ armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -6780,7 +6780,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
workloadFactory.CreateTensorHandle(inputTensorInfo2);
- armnn::MergerQueueDescriptor data;
+ armnn::ConcatQueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
@@ -6892,10 +6892,10 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
}));
std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
- armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+ armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
- armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+ armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -6913,7 +6913,7 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
workloadFactory.CreateTensorHandle(inputTensorInfo2);
- armnn::MergerQueueDescriptor data;
+ armnn::ConcatQueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp
index b3b0631e18..a0c6553e24 100644
--- a/src/backends/backendsCommon/test/QuantizeHelper.hpp
+++ b/src/backends/backendsCommon/test/QuantizeHelper.hpp
@@ -2,6 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
#include <armnn/ArmNN.hpp>
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 067cca8319..94bef9b50a 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -234,7 +234,7 @@ BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow)
}
-BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow)
+BOOST_AUTO_TEST_CASE(ConcatQueueDescriptor_Validate_WrongWindow)
{
constexpr unsigned int inputNum = 1;
constexpr unsigned int inputChannels = 3;
@@ -256,7 +256,7 @@ BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow)
inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
- MergerQueueDescriptor invalidData;
+ ConcatQueueDescriptor invalidData;
WorkloadInfo invalidInfo;
AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
@@ -264,7 +264,7 @@ BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow)
// Invalid, since it has only 3 dimensions while the input tensor is 4d.
std::vector<unsigned int> wOrigin = {0, 0, 0};
- armnn::MergerQueueDescriptor::ViewOrigin window(wOrigin);
+ armnn::ConcatQueueDescriptor::ViewOrigin window(wOrigin);
invalidData.m_ViewOrigins.push_back(window);
BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not "
@@ -273,18 +273,18 @@ BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow)
// Invalid, since window extends past the boundary of output tensor.
std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
- armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3);
+ armnn::ConcatQueueDescriptor::ViewOrigin window3(wOrigin3);
invalidData.m_ViewOrigins[0] = window3;
BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
- armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4);
+ armnn::ConcatQueueDescriptor::ViewOrigin window4(wOrigin4);
invalidData.m_ViewOrigins[0] = window4;
std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
- armnn::MergerQueueDescriptor::ViewOrigin window5(wOrigin4);
+ armnn::ConcatQueueDescriptor::ViewOrigin window5(wOrigin4);
invalidData.m_ViewOrigins.push_back(window5);
BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs.");
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index dfac28989c..78ac0e628c 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -189,12 +189,43 @@ bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
+ if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+ {
+ SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
+ return false;
+ }
+
+ unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+ if(concatInnerAxis < 3) // Width, height, or channels
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
+ reasonIfUnsupported,
+ inputs,
+ output,
+ descriptor);
+ }
+ else if (concatInnerAxis == 3)
+ {
+ // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
+ // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
+ for (auto& input : inputs)
+ {
+ if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+ {
+ SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
+ return false;
+ }
+ }
+ return true; // Sub-tensors support concat along batch
+ }
+ else // > 4 dimensions not supported.
+ {
+ SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
+ return false;
+ }
}
bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
@@ -442,43 +473,10 @@ bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
- {
- SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions.");
- return false;
- }
-
- unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
- if(concatInnerAxis < 3) // Width, height, or channels
- {
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
- reasonIfUnsupported,
- inputs,
- output,
- descriptor);
- }
- else if (concatInnerAxis == 3)
- {
- // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
- // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
- for (auto& input : inputs)
- {
- if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
- {
- SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match.");
- return false;
- }
- }
- return true; // Sub-tensors support concat along batch
- }
- else // > 4 dimensions not supported.
- {
- SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported.");
- return false;
- }
+ return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index fca0bfd352..64c1079621 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -38,7 +38,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,
@@ -146,7 +146,7 @@ public:
ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index e7cf1917b1..214b88deba 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -265,7 +265,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2Norm
return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
return MakeWorkload<ClConcatWorkload>(descriptor, info);
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index e00672f853..27221713c1 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -97,7 +97,7 @@ public:
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 7f08b80a14..dc884e01b1 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -551,30 +551,30 @@ BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
}
template <typename armnn::DataType DataType>
-static void ClSplitterMergerTest()
+static void ClSplitterConcatTest()
{
// Tests that it is possible to decide which output of the splitter layer
- // should be lined to which input of the merger layer.
+ // should be lined to which input of the concat layer.
// We test that is is possible to specify 0th output
- // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
- // of the merger.
+ // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
+ // of the concat.
Graph graph;
ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
auto workloads =
- CreateSplitterMergerWorkloadTest<ClSplitterWorkload, ClConcatWorkload, DataType>
+ CreateSplitterConcatWorkloadTest<ClSplitterWorkload, ClConcatWorkload, DataType>
(factory, graph);
auto wlSplitter = std::move(workloads.first);
- auto wlMerger = std::move(workloads.second);
+ auto wlConcat = std::move(workloads.second);
//Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
- armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
- armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+ armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+ armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
BOOST_TEST(sOut0);
BOOST_TEST(sOut1);
@@ -593,14 +593,14 @@ static void ClSplitterMergerTest()
BOOST_TEST(validSubTensorParents);
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloatWorkload)
{
- ClSplitterMergerTest<armnn::DataType::Float32>();
+ ClSplitterConcatTest<armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16Workload)
{
- ClSplitterMergerTest<armnn::DataType::Float16>();
+ ClSplitterConcatTest<armnn::DataType::Float16>();
}
@@ -801,17 +801,17 @@ BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QuantisedAsymm8>();
}
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void ClCreateMergerWorkloadTest(std::initializer_list<unsigned int> outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void ClCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
unsigned int concatAxis)
{
Graph graph;
ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+ auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
- MergerQueueDescriptor queueDescriptor = workload->GetData();
+ ConcatQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -821,34 +821,34 @@ static void ClCreateMergerWorkloadTest(std::initializer_list<unsigned int> outpu
BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
{
- ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
{
- ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
{
- ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- ClCreateMergerWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 9c010fccad..3235f26334 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -4,7 +4,7 @@
//
#include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
@@ -19,34 +19,34 @@ BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32)
ConstantUsageFloat32Test(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test)
{
- MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
{
- MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
{
- MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp
index ee4ba6b65f..fb28946549 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.cpp
@@ -19,7 +19,7 @@ using namespace armcomputetensorutils;
namespace
{
-size_t CalcAxis(const MergerDescriptor& desc)
+size_t CalcAxis(const OriginsDescriptor& desc)
{
return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
}
@@ -27,7 +27,7 @@ size_t CalcAxis(const MergerDescriptor& desc)
arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
- const MergerDescriptor& descriptor)
+ const OriginsDescriptor& descriptor)
{
std::vector<arm_compute::TensorInfo> aclInputs;
for (const TensorInfo* input : inputs)
@@ -46,8 +46,8 @@ arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*
return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
}
-ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
-: BaseWorkload<MergerQueueDescriptor>(descriptor, info)
+ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
+: BaseWorkload<ConcatQueueDescriptor>(descriptor, info)
{
bool allInputsAreSubtensors = true;
@@ -56,7 +56,7 @@ ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, cons
{
if (!input->GetParent())
{
- // Non sub-tensor input found so we need to execute the merger function
+ // Non sub-tensor input found so we need to execute the concat function
allInputsAreSubtensors = false;
break;
}
@@ -64,7 +64,7 @@ ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, cons
if (allInputsAreSubtensors)
{
- // Can skip configuring the merger function since it's not executed
+ // Can skip configuring the concat function since it's not executed
return;
}
diff --git a/src/backends/cl/workloads/ClConcatWorkload.hpp b/src/backends/cl/workloads/ClConcatWorkload.hpp
index 106193d090..c34de9ff9a 100644
--- a/src/backends/cl/workloads/ClConcatWorkload.hpp
+++ b/src/backends/cl/workloads/ClConcatWorkload.hpp
@@ -14,12 +14,12 @@ namespace armnn
arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
- const MergerDescriptor& descriptor);
+ const OriginsDescriptor& descriptor);
-class ClConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class ClConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
{
public:
- ClConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info);
+ ClConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index fd9aac5bc5..e84eb799fc 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -146,12 +146,41 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
+ if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
+ return false;
+ }
+
+ unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+ if(concatInnerAxis < 3) // Width, height, or channels
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
+ reasonIfUnsupported,
+ inputs,
+ output,
+ descriptor);
+ }
+ else if (concatInnerAxis == 3)
+ {
+ for (auto& input : inputs)
+ {
+ if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
+ return false;
+ }
+ }
+ return true; // Sub-tensors support concat along batch
+ }
+ else // > 4 dimensions not supported.
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
+ return false;
+ }
}
bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
@@ -326,41 +355,10 @@ bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
- {
- SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
- return false;
- }
-
- unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
- if(concatInnerAxis < 3) // Width, height, or channels
- {
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
- reasonIfUnsupported,
- inputs,
- output,
- descriptor);
- }
- else if (concatInnerAxis == 3)
- {
- for (auto& input : inputs)
- {
- if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
- {
- SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
- return false;
- }
- }
- return true; // Sub-tensors support concat along batch
- }
- else // > 4 dimensions not supported.
- {
- SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
- return false;
- }
+ return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 5e8e0bdbed..dd6ed79c9a 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -33,7 +33,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,
@@ -109,7 +109,7 @@ public:
ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 3005dae94c..4b6225f67b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -233,7 +233,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2No
m_MemoryManager->GetIntraLayerManager());
}
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
return std::make_unique<NeonConcatWorkload>(descriptor, info);
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 60dbb90b60..6a28d12326 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -98,7 +98,7 @@ public:
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index b41d62f7eb..83823659b0 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -504,30 +504,30 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMerger)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
{
// Tests that it is possible to decide which output of the splitter layer
- // should be lined to which input of the merger layer.
+ // should be lined to which input of the concat layer.
// We tested that is is possible to specify 0th output
- // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input
- // of the merger.
+ // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input
+ // of the concat.
Graph graph;
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
auto workloads =
- CreateSplitterMergerWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
+ CreateSplitterConcatWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
DataType::Float32>(factory, graph);
auto wlSplitter = std::move(workloads.first);
- auto wlMerger = std::move(workloads.second);
+ auto wlConcat = std::move(workloads.second);
//Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
- armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
- armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+ armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+ armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
BOOST_TEST(sOut0);
BOOST_TEST(sOut1);
@@ -632,17 +632,17 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
}
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void NeonCreateMergerWorkloadTest(std::initializer_list<unsigned int> outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void NeonCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
unsigned int concatAxis)
{
Graph graph;
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
- auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+ auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
- MergerQueueDescriptor queueDescriptor = workload->GetData();
+ ConcatQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle0 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -652,34 +652,34 @@ static void NeonCreateMergerWorkloadTest(std::initializer_list<unsigned int> out
BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 441efed9a9..15f5fc330e 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -4,7 +4,7 @@
//
#include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
@@ -93,34 +93,34 @@ BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
{
- MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
{
- MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
{
- MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitDim0EndToEndTest)
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index 91f81090ce..8ea535b40a 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -19,7 +19,7 @@ using namespace armcomputetensorutils;
namespace
{
-size_t CalcAxis(const armnn::MergerDescriptor& desc)
+size_t CalcAxis(const armnn::OriginsDescriptor& desc)
{
return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
}
@@ -27,7 +27,7 @@ size_t CalcAxis(const armnn::MergerDescriptor& desc)
arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
- const MergerDescriptor& descriptor)
+ const OriginsDescriptor& descriptor)
{
std::vector<arm_compute::TensorInfo> aclInputs;
@@ -48,8 +48,8 @@ arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInf
}
NeonConcatWorkload::NeonConcatWorkload(
-const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
- : BaseWorkload<MergerQueueDescriptor>(descriptor, info)
+const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ConcatQueueDescriptor>(descriptor, info)
{
bool allInputsAreSubtensors = true;
@@ -58,7 +58,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
{
if (!input->GetParent())
{
- // Non sub-tensor input found so we need to execute the merger function
+ // Non sub-tensor input found so we need to execute the concat function
allInputsAreSubtensors = false;
break;
}
@@ -66,7 +66,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
if (allInputsAreSubtensors)
{
- // Can skip configuring the merger function since it's not executed
+ // Can skip configuring the concat function since it's not executed
return;
}
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.hpp b/src/backends/neon/workloads/NeonConcatWorkload.hpp
index e5a8d15055..bf0733b431 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.hpp
@@ -17,14 +17,14 @@ namespace armnn
{
arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
- const MergerDescriptor& descriptor);
+ const OriginsDescriptor& descriptor);
-class NeonConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class NeonConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
{
public:
- NeonConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info);
- using BaseWorkload<MergerQueueDescriptor>::BaseWorkload;
+ using BaseWorkload<ConcatQueueDescriptor>::BaseWorkload;
void Execute() const override;
private:
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2adcb1099d..9a691a6fa7 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -316,18 +316,38 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
+ ignore_unused(descriptor);
+
+ bool supported = true;
+ std::array<DataType,3> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference concatenation: output type not supported");
+ for (const TensorInfo* input : inputs)
+ {
+ supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
+ "Reference concatenation: input type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
+ "Reference concatenation: input and output types mismatched.");
+ }
+
+ return supported;
}
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,4> supportedTypes =
+ {
DataType::Float32,
DataType::Signed32,
DataType::QuantisedAsymm8,
@@ -815,31 +835,10 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
-
- bool supported = true;
- std::array<DataType,3> supportedTypes =
- {
- DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
- };
-
- supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
- "Reference concatenation: output type not supported");
- for (const TensorInfo* input : inputs)
- {
- supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
- "Reference concatenation: input type not supported");
-
- supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
- "Reference concatenation: input and output types mismatched.");
- }
-
- return supported;
+ return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 944061d5a6..8850c6e105 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -38,7 +38,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,
@@ -170,7 +170,7 @@ public:
ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsMemCopySupported(const TensorInfo& input,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 1243328852..a21becdb13 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -245,7 +245,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2Nor
return MakeWorkload<RefL2NormalizationFloat32Workload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
if (IsFloat16(info))
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 985b634d77..78f6bab92c 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -115,7 +115,7 @@ public:
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 1c7f8dc22c..9a4cf146c6 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -21,7 +21,7 @@ BACKEND_SOURCES := \
workloads/FullyConnected.cpp \
workloads/Gather.cpp \
workloads/Mean.cpp \
- workloads/Merger.cpp \
+ workloads/Concatenate.cpp \
workloads/Pad.cpp \
workloads/Pooling2d.cpp \
workloads/RefActivationWorkload.cpp \
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 3f4cc75fea..a96d656d9b 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -473,28 +473,28 @@ BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
}
-template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
-static void RefCreateSplitterMergerWorkloadTest()
+template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
+static void RefCreateSplitterConcatWorkloadTest()
{
// Tests that it is possible to decide which output of the splitter layer
- // should be lined to which input of the merger layer.
+ // should be lined to which input of the concat layer.
// We tested that is is possible to specify 0th output
- // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
- // of the merger.
+ // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
+ // of the concat.
Graph graph;
RefWorkloadFactory factory;
- auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
- (factory, graph);
+ auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
+ (factory, graph);
auto wlSplitter = std::move(workloads.first);
- auto wlMerger = std::move(workloads.second);
+ auto wlConcat = std::move(workloads.second);
//Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
- armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
- armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+ armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+ armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
BOOST_TEST(sOut0);
BOOST_TEST(sOut1);
@@ -506,14 +506,14 @@ static void RefCreateSplitterMergerWorkloadTest()
BOOST_TEST(validDataPointers);
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
{
- RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefConcatWorkload, DataType::Float32>();
+ RefCreateSplitterConcatWorkloadTest<RefSplitterFloat32Workload, RefConcatWorkload, DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
{
- RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefConcatWorkload, DataType::QuantisedAsymm8>();
+ RefCreateSplitterConcatWorkloadTest<RefSplitterUint8Workload, RefConcatWorkload, DataType::QuantisedAsymm8>();
}
template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
@@ -671,13 +671,13 @@ BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
}
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void RefCreateMergerWorkloadTest(const armnn::TensorShape& outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
unsigned int concatAxis)
{
Graph graph;
RefWorkloadFactory factory;
- auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+ auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
CheckInputsOutput(std::move(workload),
TensorInfo({ 2, 3, 2, 5 }, DataType),
@@ -685,49 +685,49 @@ static void RefCreateMergerWorkloadTest(const armnn::TensorShape& outputShape,
TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint16Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim2Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim2Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- RefCreateMergerWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
}
template <typename ConstantWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 6dacfab4d1..2b7fb774b5 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -7,7 +7,7 @@
#include <backendsCommon/test/DetectionPostProcessTestImpl.hpp>
#include <backendsCommon/test/GatherEndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
@@ -396,44 +396,44 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
{
- MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
{
- MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
{
- MergerDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
{
- MergerDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
{
- MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 508dfdc293..3db0314346 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -28,8 +28,8 @@ list(APPEND armnnRefBackendWorkloads_sources
Gather.hpp
LstmUtils.hpp
Maximum.hpp
- Merger.hpp
- Merger.cpp
+ Concatenate.hpp
+ Concatenate.cpp
Minimum.hpp
Pad.cpp
Pad.hpp
diff --git a/src/backends/reference/workloads/Merger.cpp b/src/backends/reference/workloads/Concatenate.cpp
index e0b70ee5cb..bb55424c0c 100644
--- a/src/backends/reference/workloads/Merger.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "Merger.hpp"
+#include "Concatenate.hpp"
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
@@ -11,7 +11,7 @@
namespace armnn
{
-void Merger(const MergerQueueDescriptor& data)
+void Concatenate(const ConcatQueueDescriptor &data)
{
const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]);
@@ -34,7 +34,7 @@ void Merger(const MergerQueueDescriptor& data)
for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx)
{
- MergerQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx];
+ ConcatQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx];
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
diff --git a/src/backends/reference/workloads/Merger.hpp b/src/backends/reference/workloads/Concatenate.hpp
index eaa154d25a..ac82a87af3 100644
--- a/src/backends/reference/workloads/Merger.hpp
+++ b/src/backends/reference/workloads/Concatenate.hpp
@@ -10,5 +10,5 @@
namespace armnn
{
-void Merger(const MergerQueueDescriptor& data);
+void Concatenate(const ConcatQueueDescriptor &data);
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConcatWorkload.cpp b/src/backends/reference/workloads/RefConcatWorkload.cpp
index 9abddc0ff8..152eae93b3 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.cpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.cpp
@@ -5,7 +5,7 @@
#include "RefConcatWorkload.hpp"
-#include "Merger.hpp"
+#include "Concatenate.hpp"
#include "Profiling.hpp"
@@ -15,7 +15,7 @@ namespace armnn
void RefConcatWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConcatWorkload_Execute");
- Merger(m_Data);
+ Concatenate(m_Data);
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefConcatWorkload.hpp b/src/backends/reference/workloads/RefConcatWorkload.hpp
index 9fc9c7ef7e..7d0b6b7cd1 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.hpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.hpp
@@ -11,10 +11,10 @@
namespace armnn
{
-class RefConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class RefConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
{
public:
- using BaseWorkload<MergerQueueDescriptor>::BaseWorkload;
+ using BaseWorkload<ConcatQueueDescriptor>::BaseWorkload;
virtual void Execute() const override;
};
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 20649d93ce..6ffec2bd06 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -38,7 +38,7 @@
#include "RefPooling2dUint8Workload.hpp"
#include "BatchNormImpl.hpp"
#include "Activation.hpp"
-#include "Merger.hpp"
+#include "Concatenate.hpp"
#include "RefSpaceToBatchNdWorkload.hpp"
#include "RefSplitterFloat32Workload.hpp"
#include "RefStridedSliceWorkload.hpp"