diff options
author | Nikhil Raj <nikhil.raj@arm.com> | 2018-11-19 14:51:07 +0000 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2018-11-19 14:51:07 +0000 |
commit | 8599a415c159aa867db12853b3195f0f0a51ee6b (patch) | |
tree | f85987c71dc745d7da7c672466723c26e39290b6 /src/backends/neon | |
parent | 1d67a6905daed13354e66f00549e12fea62170ed (diff) | |
download | armnn-8599a415c159aa867db12853b3195f0f0a51ee6b.tar.gz |
IVGCVSW-2043 - Merger using ACL for innermost concat axis
* Add ClMergerWorkload and NeonMergerWorkload to call ACL for innermost concat axis
* Modify layer support to call ClMergerWorkloadValidate and NeonMergerWorkloadValidate when concat axis is inner most
* Add m_ConcatAxis to MergerDescriptor
* Modify MergerQueueDescriptor::Validate to check sub tensor only when using subtensor
!android-nn-driver:166
Change-Id: I56676b43964c8d6d726387b41b3cc34a512c0f0a
Diffstat (limited to 'src/backends/neon')
-rw-r--r-- | src/backends/neon/NeonLayerSupport.cpp | 22 | ||||
-rw-r--r-- | src/backends/neon/NeonLayerSupport.hpp | 1 | ||||
-rw-r--r-- | src/backends/neon/backend.mk | 1 | ||||
-rw-r--r-- | src/backends/neon/workloads/CMakeLists.txt | 1 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonMergerWorkload.cpp | 84 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonMergerWorkload.hpp | 16 |
6 files changed, 116 insertions, 9 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 28c4b75f2a..fd279e5d55 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -23,6 +23,7 @@ #include "workloads/NeonConvolution2dWorkload.hpp" #include "workloads/NeonDepthwiseConvolutionWorkload.hpp" #include "workloads/NeonL2NormalizationFloatWorkload.hpp" +#include "workloads/NeonMergerWorkload.hpp" #include "workloads/NeonMultiplicationFloatWorkload.hpp" #include "workloads/NeonNormalizationFloatWorkload.hpp" #include "workloads/NeonFullyConnectedWorkload.hpp" @@ -334,14 +335,25 @@ bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input, } bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs, + const TensorInfo& output, const OriginsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const { - ignore_unused(descriptor); - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); + if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate, + reasonIfUnsupported, + inputs, + output, + descriptor); + } + else + { + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + inputs[0]->GetDataType(), + &TrueFunc<>, + &TrueFunc<>); + } } bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index fb1567c12d..e5cd3cc062 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -117,6 +117,7 @@ public: Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; bool IsMergerSupported(const std::vector<const TensorInfo*> inputs, + const TensorInfo& output, const OriginsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 8f7e72b17c..2f74ecd1ce 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -25,6 +25,7 @@ BACKEND_SOURCES := \ workloads/NeonFullyConnectedWorkload.cpp \ workloads/NeonL2NormalizationFloatWorkload.cpp \ workloads/NeonLstmFloatWorkload.cpp \ + workloads/NeonMergerWorkload.cpp \ workloads/NeonMultiplicationFloatWorkload.cpp \ workloads/NeonNormalizationFloatWorkload.cpp \ workloads/NeonPermuteWorkload.cpp \ diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index f6e8d1c04a..e383b04f25 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -28,6 +28,7 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonL2NormalizationFloatWorkload.hpp NeonLstmFloatWorkload.cpp NeonLstmFloatWorkload.hpp + NeonMergerWorkload.cpp NeonMergerWorkload.hpp NeonMultiplicationFloatWorkload.cpp NeonMultiplicationFloatWorkload.hpp diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp new file mode 100644 index 0000000000..f82e24453a --- /dev/null +++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp @@ -0,0 +1,84 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonMergerWorkload.hpp" +#include <armnn/ArmNN.hpp> +#include <aclCommon/ArmComputeTensorUtils.hpp> +#include <backendsCommon/CpuTensorHandle.hpp> +#include <neon/NeonTensorHandle.hpp> + + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs, + const TensorInfo& output, + const MergerDescriptor& descriptor) + +{ + std::vector<arm_compute::TensorInfo> aclInputs; + for (const TensorInfo* input : inputs) + { + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); + aclInputs.emplace_back(aclInputInfo); + } + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + + std::vector<arm_compute::ITensorInfo*> aclInputPtrs; + for (arm_compute::ITensorInfo& input : aclInputs) + { + aclInputPtrs.emplace_back(&input); + } + + return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); + +} + +NeonMergerWorkload::NeonMergerWorkload( +const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<MergerQueueDescriptor>(descriptor, info) +{ + m_Execute = true; + + unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + + if (innerAxisOrder != 1) + { + m_Execute = false; + return; + } + + std::vector<arm_compute::ITensor *> aclInputs; + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); + for (auto input : m_Data.m_Inputs) + { + arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<INeonTensorHandle>(input)->GetTensor(); + aclInput.info()->set_data_layout(aclDataLayout); + aclInputs.emplace_back(&aclInput); + } + arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>( + m_Data.m_Outputs[0])->GetTensor(); + output.info()->set_data_layout(aclDataLayout); + + arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + + m_Layer.configure(aclInputs, &output, aclAxis); + + m_Layer.prepare(); +} + +void NeonMergerWorkload::Execute() const +{ + if (m_Execute) + { + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute"); + m_Layer.run(); + } +} + +} //namespace armnn + diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp index 7103d8a469..a4f36d18bc 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.hpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp @@ -6,18 +6,26 @@ #pragma once #include <backendsCommon/Workload.hpp> +#include <neon/workloads/NeonWorkloadUtils.hpp> namespace armnn { +arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs, + const TensorInfo& output, + const MergerDescriptor& descriptor); + class NeonMergerWorkload : public BaseWorkload<MergerQueueDescriptor> { public: + NeonMergerWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info); + using BaseWorkload<MergerQueueDescriptor>::BaseWorkload; + void Execute() const override; + +private: + mutable arm_compute::NEConcatenateLayer m_Layer; + bool m_Execute; - virtual void Execute() const override - { - // With subtensors, merger is a no-op. - } }; } //namespace armnn |