From e242f2dc646f41e9162aaaf74e057ce39fcb92df Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Wed, 22 May 2019 14:24:13 +0100 Subject: IVGCVSW-3119 Rename MergerLayer to ConcatLayer !android-nn-driver:1210 Change-Id: I940b3b9e421c92bfd55ae996f7bc54ac077f2604 Signed-off-by: Jim Flynn --- src/backends/neon/NeonLayerSupport.cpp | 72 +++++++++++----------- src/backends/neon/NeonLayerSupport.hpp | 4 +- src/backends/neon/NeonWorkloadFactory.cpp | 2 +- src/backends/neon/NeonWorkloadFactory.hpp | 2 +- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 48 +++++++-------- src/backends/neon/test/NeonEndToEndTests.cpp | 26 ++++---- src/backends/neon/workloads/NeonConcatWorkload.cpp | 12 ++-- src/backends/neon/workloads/NeonConcatWorkload.hpp | 8 +-- 8 files changed, 86 insertions(+), 88 deletions(-) (limited to 'src/backends/neon') diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index fd9aac5bc5..e84eb799fc 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -146,12 +146,41 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, bool NeonLayerSupport::IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported) const { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); - ARMNN_NO_DEPRECATE_WARN_END + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate, + reasonIfUnsupported, + inputs, + output, + descriptor); + } + else if (concatInnerAxis == 3) + { + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. + { + SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported."); + return false; + } } bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output, @@ -326,41 +355,10 @@ bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input, bool NeonLayerSupport::IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported) const { - if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) - { - SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions."); - return false; - } - - unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; - if(concatInnerAxis < 3) // Width, height, or channels - { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate, - reasonIfUnsupported, - inputs, - output, - descriptor); - } - else if (concatInnerAxis == 3) - { - for (auto& input : inputs) - { - if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space - { - SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match."); - return false; - } - } - return true; // Sub-tensors support concat along batch - } - else // > 4 dimensions not supported. - { - SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported."); - return false; - } + return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported); } bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 5e8e0bdbed..dd6ed79c9a 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -33,7 +33,7 @@ public: bool IsConcatSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const ConcatDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConstantSupported(const TensorInfo& output, @@ -109,7 +109,7 @@ public: ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, - const OriginsDescriptor& descriptor, + const MergerDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 3005dae94c..4b6225f67b 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -233,7 +233,7 @@ std::unique_ptr NeonWorkloadFactory::CreateL2Normalization(const L2No m_MemoryManager->GetIntraLayerManager()); } -std::unique_ptr NeonWorkloadFactory::CreateConcat(const MergerQueueDescriptor& descriptor, +std::unique_ptr NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 60dbb90b60..6a28d12326 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -98,7 +98,7 @@ public: std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - std::unique_ptr CreateConcat(const MergerQueueDescriptor& descriptor, + std::unique_ptr CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const override; std::unique_ptr CreateConstant(const ConstantQueueDescriptor& descriptor, diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index b41d62f7eb..83823659b0 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -504,30 +504,30 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); } -BOOST_AUTO_TEST_CASE(CreateSplitterMerger) +BOOST_AUTO_TEST_CASE(CreateSplitterConcat) { // Tests that it is possible to decide which output of the splitter layer - // should be lined to which input of the merger layer. + // should be lined to which input of the concat layer. // We tested that is is possible to specify 0th output - // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input - // of the merger. + // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input + // of the concat. Graph graph; NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); auto workloads = - CreateSplitterMergerWorkloadTest(factory, graph); auto wlSplitter = std::move(workloads.first); - auto wlMerger = std::move(workloads.second); + auto wlConcat = std::move(workloads.second); //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlMerger->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlMerger->GetData().m_Inputs[1]); + armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); BOOST_TEST(sOut0); BOOST_TEST(sOut1); @@ -632,17 +632,17 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload) NeonCreateL2NormalizationWorkloadTest(DataLayout::NHWC); } -template -static void NeonCreateMergerWorkloadTest(std::initializer_list outputShape, +template +static void NeonCreateConcatWorkloadTest(std::initializer_list outputShape, unsigned int concatAxis) { Graph graph; NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); - auto workload = CreateMergerWorkloadTest(factory, graph, outputShape, concatAxis); + auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); - MergerQueueDescriptor queueDescriptor = workload->GetData(); + ConcatQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); @@ -652,34 +652,34 @@ static void NeonCreateMergerWorkloadTest(std::initializer_list out BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) { - NeonCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + NeonCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload) { - NeonCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + NeonCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload) { - NeonCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + NeonCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } -BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) { - NeonCreateMergerWorkloadTest({ 4, 3, 2, 5 }, 0); + NeonCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload) { - NeonCreateMergerWorkloadTest({ 2, 6, 2, 5 }, 1); + NeonCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload) +BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload) { - NeonCreateMergerWorkloadTest({ 2, 3, 2, 10 }, 3); + NeonCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index 441efed9a9..15f5fc330e 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -4,7 +4,7 @@ // #include -#include +#include #include #include @@ -93,34 +93,34 @@ BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test) +BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + ConcatDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonSplitDim0EndToEndTest) diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp index 91f81090ce..8ea535b40a 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.cpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp @@ -19,7 +19,7 @@ using namespace armcomputetensorutils; namespace { -size_t CalcAxis(const armnn::MergerDescriptor& desc) +size_t CalcAxis(const armnn::OriginsDescriptor& desc) { return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; } @@ -27,7 +27,7 @@ size_t CalcAxis(const armnn::MergerDescriptor& desc) arm_compute::Status NeonConcatWorkloadValidate(const std::vector& inputs, const TensorInfo& output, - const MergerDescriptor& descriptor) + const OriginsDescriptor& descriptor) { std::vector aclInputs; @@ -48,8 +48,8 @@ arm_compute::Status NeonConcatWorkloadValidate(const std::vector(descriptor, info) +const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) { bool allInputsAreSubtensors = true; @@ -58,7 +58,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) { if (!input->GetParent()) { - // Non sub-tensor input found so we need to execute the merger function + // Non sub-tensor input found so we need to execute the concat function allInputsAreSubtensors = false; break; } @@ -66,7 +66,7 @@ const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) if (allInputsAreSubtensors) { - // Can skip configuring the merger function since it's not executed + // Can skip configuring the concat function since it's not executed return; } diff --git a/src/backends/neon/workloads/NeonConcatWorkload.hpp b/src/backends/neon/workloads/NeonConcatWorkload.hpp index e5a8d15055..bf0733b431 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.hpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.hpp @@ -17,14 +17,14 @@ namespace armnn { arm_compute::Status NeonConcatWorkloadValidate(const std::vector& inputs, const TensorInfo& output, - const MergerDescriptor& descriptor); + const OriginsDescriptor& descriptor); -class NeonConcatWorkload : public BaseWorkload +class NeonConcatWorkload : public BaseWorkload { public: - NeonConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info); + NeonConcatWorkload(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info); - using BaseWorkload::BaseWorkload; + using BaseWorkload::BaseWorkload; void Execute() const override; private: -- cgit v1.2.1