aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2019-05-22 14:24:13 +0100
committerJim Flynn <jim.flynn@arm.com>2019-05-28 17:50:33 +0100
commite242f2dc646f41e9162aaaf74e057ce39fcb92df (patch)
treed6c49b559c34d1d306b1e901501dded1c18f71c5 /src/backends/neon
parent2f2778f36e59537bbd47fb8b21e73c6c5a949584 (diff)
downloadarmnn-e242f2dc646f41e9162aaaf74e057ce39fcb92df.tar.gz
IVGCVSW-3119 Rename MergerLayer to ConcatLayer
!android-nn-driver:1210 Change-Id: I940b3b9e421c92bfd55ae996f7bc54ac077f2604 Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp72
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp4
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp2
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp48
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp26
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.cpp12
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.hpp8
8 files changed, 86 insertions, 88 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index fd9aac5bc5..e84eb799fc 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -146,12 +146,41 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
- ARMNN_NO_DEPRECATE_WARN_END
+ if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
+ return false;
+ }
+
+ unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+ if(concatInnerAxis < 3) // Width, height, or channels
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
+ reasonIfUnsupported,
+ inputs,
+ output,
+ descriptor);
+ }
+ else if (concatInnerAxis == 3)
+ {
+ for (auto& input : inputs)
+ {
+ if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
+ return false;
+ }
+ }
+ return true; // Sub-tensors support concat along batch
+ }
+ else // > 4 dimensions not supported.
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
+ return false;
+ }
}
bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
@@ -326,41 +355,10 @@ bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
- {
- SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
- return false;
- }
-
- unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
- if(concatInnerAxis < 3) // Width, height, or channels
- {
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
- reasonIfUnsupported,
- inputs,
- output,
- descriptor);
- }
- else if (concatInnerAxis == 3)
- {
- for (auto& input : inputs)
- {
- if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
- {
- SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
- return false;
- }
- }
- return true; // Sub-tensors support concat along batch
- }
- else // > 4 dimensions not supported.
- {
- SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
- return false;
- }
+ return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 5e8e0bdbed..dd6ed79c9a 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -33,7 +33,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,
@@ -109,7 +109,7 @@ public:
ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+ const MergerDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 3005dae94c..4b6225f67b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -233,7 +233,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2No
m_MemoryManager->GetIntraLayerManager());
}
-std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor,
+std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
return std::make_unique<NeonConcatWorkload>(descriptor, info);
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 60dbb90b60..6a28d12326 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -98,7 +98,7 @@ public:
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConcat(const MergerQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index b41d62f7eb..83823659b0 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -504,30 +504,30 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
}
-BOOST_AUTO_TEST_CASE(CreateSplitterMerger)
+BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
{
// Tests that it is possible to decide which output of the splitter layer
- // should be lined to which input of the merger layer.
+ // should be lined to which input of the concat layer.
// We tested that is is possible to specify 0th output
- // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input
- // of the merger.
+ // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input
+ // of the concat.
Graph graph;
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
auto workloads =
- CreateSplitterMergerWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
+ CreateSplitterConcatWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
DataType::Float32>(factory, graph);
auto wlSplitter = std::move(workloads.first);
- auto wlMerger = std::move(workloads.second);
+ auto wlConcat = std::move(workloads.second);
//Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
- armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
- armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
+ armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
+ armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
BOOST_TEST(sOut0);
BOOST_TEST(sOut1);
@@ -632,17 +632,17 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
}
-template <typename MergerWorkloadType, armnn::DataType DataType>
-static void NeonCreateMergerWorkloadTest(std::initializer_list<unsigned int> outputShape,
+template <typename ConcatWorkloadType, armnn::DataType DataType>
+static void NeonCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
unsigned int concatAxis)
{
Graph graph;
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
- auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
+ auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
- MergerQueueDescriptor queueDescriptor = workload->GetData();
+ ConcatQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle0 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -652,34 +652,34 @@ static void NeonCreateMergerWorkloadTest(std::initializer_list<unsigned int> out
BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
+BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- NeonCreateMergerWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 441efed9a9..15f5fc330e 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -4,7 +4,7 @@
//
#include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/MergerTestImpl.hpp>
+#include <backendsCommon/test/ConcatTestImpl.hpp>
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
@@ -93,34 +93,34 @@ BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
{
- MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
{
- MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
{
- MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
+BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitDim0EndToEndTest)
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index 91f81090ce..8ea535b40a 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -19,7 +19,7 @@ using namespace armcomputetensorutils;
namespace
{
-size_t CalcAxis(const armnn::MergerDescriptor& desc)
+size_t CalcAxis(const armnn::OriginsDescriptor& desc)
{
return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
}
@@ -27,7 +27,7 @@ size_t CalcAxis(const armnn::MergerDescriptor& desc)
arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
- const MergerDescriptor& descriptor)
+ const OriginsDescriptor& descriptor)
{
std::vector<arm_compute::TensorInfo> aclInputs;
@@ -48,8 +48,8 @@ arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInf
}
NeonConcatWorkload::NeonConcatWorkload(
-const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
- : BaseWorkload<MergerQueueDescriptor>(descriptor, info)
+const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ConcatQueueDescriptor>(descriptor, info)
{
bool allInputsAreSubtensors = true;
@@ -58,7 +58,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
{
if (!input->GetParent())
{
- // Non sub-tensor input found so we need to execute the merger function
+ // Non sub-tensor input found so we need to execute the concat function
allInputsAreSubtensors = false;
break;
}
@@ -66,7 +66,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
if (allInputsAreSubtensors)
{
- // Can skip configuring the merger function since it's not executed
+ // Can skip configuring the concat function since it's not executed
return;
}
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.hpp b/src/backends/neon/workloads/NeonConcatWorkload.hpp
index e5a8d15055..bf0733b431 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.hpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.hpp
@@ -17,14 +17,14 @@ namespace armnn
{
arm_compute::Status NeonConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
- const MergerDescriptor& descriptor);
+ const OriginsDescriptor& descriptor);
-class NeonConcatWorkload : public BaseWorkload<MergerQueueDescriptor>
+class NeonConcatWorkload : public BaseWorkload<ConcatQueueDescriptor>
{
public:
- NeonConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info);
- using BaseWorkload<MergerQueueDescriptor>::BaseWorkload;
+ using BaseWorkload<ConcatQueueDescriptor>::BaseWorkload;
void Execute() const override;
private: