From 0790dcea1056298d63f97dec904c8ade5d21f439 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Mon, 15 Apr 2019 18:37:35 +0100 Subject: IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic + Update clframework pin + Cl and Neon Merger workloads updated to use MemoryLayout agnostic API + Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors + Refactor LayerSupportCommon code to be a bit more succinct Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828 Signed-off-by: Derek Lamberti --- src/backends/neon/NeonLayerSupport.cpp | 38 +++++++++++----- src/backends/neon/NeonWorkloadFactory.cpp | 6 +++ src/backends/neon/workloads/NeonMergerWorkload.cpp | 52 ++++++++++++++-------- src/backends/neon/workloads/NeonMergerWorkload.hpp | 7 ++- 4 files changed, 68 insertions(+), 35 deletions(-) (limited to 'src/backends/neon') diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 46a7e6f79f..898660cc91 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -52,10 +52,7 @@ bool IsNeonBackendSupported(Optional reasonIfUnsupported) #if defined(ARMCOMPUTENEON_ENABLED) return true; #else - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "The armnn library has been built without NEON support"; - } + SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support"); return false; #endif } @@ -304,7 +301,14 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector in const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1) + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels { FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate, reasonIfUnsupported, @@ -312,13 +316,23 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector in output, descriptor); } - else - { - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); - } + else if (concatInnerAxis == 3) + { + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported."); + return false; + } } bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 101e59d0c4..8db5f9ad84 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -61,6 +61,12 @@ std::unique_ptr NeonWorkloadFactory::CreateSubTensorHandle(ITenso coords.set(i, boost::numeric_cast(subTensorOrigin[revertedIndex])); } + const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape()); + if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape)) + { + return nullptr; + } + return std::make_unique( boost::polymorphic_downcast(&parent), shape, coords); } diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp index be096b4b25..64d4d93d97 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.cpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp @@ -11,12 +11,20 @@ #include #include -#include + namespace armnn { using namespace armcomputetensorutils; +namespace +{ +size_t CalcAxis(const armnn::MergerDescriptor& desc) +{ + return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; +} +} //namespace + arm_compute::Status NeonMergerWorkloadValidate(const std::vector& inputs, const TensorInfo& output, const MergerDescriptor& descriptor) @@ -25,60 +33,66 @@ arm_compute::Status NeonMergerWorkloadValidate(const std::vector aclInputs; for (const TensorInfo* input : inputs) { - arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); - aclInputs.emplace_back(aclInputInfo); + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); + aclInputs.emplace_back(aclInputInfo); } const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; - std::vector aclInputPtrs; for (arm_compute::ITensorInfo& input : aclInputs) { aclInputPtrs.emplace_back(&input); } + size_t aclAxis = CalcAxis(descriptor); return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); - } NeonMergerWorkload::NeonMergerWorkload( const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) { - m_Execute = true; + bool allInputsAreSubtensors = true; - unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + // Check that all inputs are sub-tensors + for (auto input : descriptor.m_Inputs) + { + if (!input->GetParent()) + { + // Non sub-tensor input found so we need to execute the merger function + allInputsAreSubtensors = false; + break; + } + } - if (innerAxisOrder != 1) + if (allInputsAreSubtensors) { - m_Execute = false; + // Can skip configuring the merger function since it's not executed return; } std::vector aclInputs; - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); for (auto input : m_Data.m_Inputs) { arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); - aclInput.info()->set_data_layout(aclDataLayout); aclInputs.emplace_back(&aclInput); } arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( - m_Data.m_Outputs[0])->GetTensor(); - output.info()->set_data_layout(aclDataLayout); + m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + // Create the layer function + m_Layer.reset(new arm_compute::NEConcatenateLayer()); - auto layer = std::make_unique(); - layer->configure(aclInputs, &output, aclAxis); - m_Layer.reset(layer.release()); + // Configure input and output tensors + size_t aclAxis = CalcAxis(descriptor.m_Parameters); + m_Layer->configure(aclInputs, &output, aclAxis); + // Prepare m_Layer->prepare(); } void NeonMergerWorkload::Execute() const { - if (m_Execute) + if (m_Layer) { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute"); m_Layer->run(); diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp index 3432c626cb..1dd9309aff 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.hpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp @@ -9,7 +9,8 @@ #include #include -# +#include + #include namespace armnn @@ -27,9 +28,7 @@ public: void Execute() const override; private: - std::unique_ptr m_Layer; - bool m_Execute; - + std::unique_ptr m_Layer; }; } //namespace armnn -- cgit v1.2.1