diff options
Diffstat (limited to 'src/backends')
-rw-r--r-- | src/backends/cl/ClLayerSupport.cpp | 31 | ||||
-rw-r--r-- | src/backends/cl/ClWorkloadFactory.cpp | 6 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClMergerWorkload.cpp | 51 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClMergerWorkload.hpp | 3 | ||||
-rw-r--r-- | src/backends/neon/NeonLayerSupport.cpp | 38 | ||||
-rw-r--r-- | src/backends/neon/NeonWorkloadFactory.cpp | 6 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonMergerWorkload.cpp | 52 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonMergerWorkload.hpp | 7 |
8 files changed, 133 insertions, 61 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index cfc0f11d25..a5c5f2bd9d 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -416,7 +416,14 @@ bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inpu const OriginsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const { - if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1) + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels { FORWARD_WORKLOAD_VALIDATE_FUNC(ClMergerWorkloadValidate, reasonIfUnsupported, @@ -424,12 +431,24 @@ bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inpu output, descriptor); } - else + else if (concatInnerAxis == 3) + { + // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use + // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work. + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. { - return IsSupportedForDataTypeCl(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); + SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported."); + return false; } } diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index d41a7e5997..e4097a1b50 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -113,6 +113,12 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex])); } + const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape()); + if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape)) + { + return nullptr; + } + return std::make_unique<ClSubTensorHandle>( boost::polymorphic_downcast<IClTensorHandle*>(&parent), shape, coords); } diff --git a/src/backends/cl/workloads/ClMergerWorkload.cpp b/src/backends/cl/workloads/ClMergerWorkload.cpp index e06d8c51f5..610acb91fb 100644 --- a/src/backends/cl/workloads/ClMergerWorkload.cpp +++ b/src/backends/cl/workloads/ClMergerWorkload.cpp @@ -9,16 +9,25 @@ #include <cl/ClTensorHandle.hpp> #include <cl/ClLayerSupport.hpp> +#include <arm_compute/core/Types.h> + #include <boost/polymorphic_pointer_cast.hpp> namespace armnn { using namespace armcomputetensorutils; +namespace +{ +size_t CalcAxis(const MergerDescriptor& desc) +{ + return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; +} +} //namespace + arm_compute::Status ClMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs, const TensorInfo& output, const MergerDescriptor& descriptor) - { std::vector<arm_compute::TensorInfo> aclInputs; for (const TensorInfo* input : inputs) @@ -27,59 +36,65 @@ arm_compute::Status ClMergerWorkloadValidate(const std::vector<const TensorInfo* aclInputs.emplace_back(aclInputInfo); } const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; - std::vector<arm_compute::ITensorInfo*> aclInputPtrs; for (arm_compute::ITensorInfo& input : aclInputs) { aclInputPtrs.emplace_back(&input); } + size_t aclAxis = CalcAxis(descriptor); return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); - } ClMergerWorkload::ClMergerWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload<MergerQueueDescriptor>(descriptor, info) { - m_Execute = true; + bool allInputsAreSubtensors = true; - unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + // Check that all inputs are sub-tensors + for (auto input : descriptor.m_Inputs) + { + if (!input->GetParent()) + { + // Non sub-tensor input found so we need to execute the merger function + allInputsAreSubtensors = false; + break; + } + } - if (innerAxisOrder != 1) + if (allInputsAreSubtensors) { - m_Execute = false; + // Can skip configuring the merger function since it's not executed return; } std::vector<arm_compute::ICLTensor *> aclInputs; - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); for (auto input : m_Data.m_Inputs) { arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor(); - aclInput.info()->set_data_layout(aclDataLayout); aclInputs.emplace_back(&aclInput); } arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>( m_Data.m_Outputs[0])->GetTensor(); - output.info()->set_data_layout(aclDataLayout); - - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; - m_Layer.configure(aclInputs, &output, aclAxis); + // Create the layer function + m_Layer.reset(new arm_compute::CLConcatenateLayer()); - m_Layer.prepare(); + // Configure input and output tensors + size_t aclAxis = CalcAxis(descriptor.m_Parameters); + m_Layer->configure(aclInputs, &output, aclAxis); + // Prepare + m_Layer->prepare(); } void ClMergerWorkload::Execute() const { - if (m_Execute) + if (m_Layer) { ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerWorkload_Execute"); - m_Layer.run(); + m_Layer->run(); } - } } //namespace armnn
\ No newline at end of file diff --git a/src/backends/cl/workloads/ClMergerWorkload.hpp b/src/backends/cl/workloads/ClMergerWorkload.hpp index 8189a1b24a..1c2f823b7a 100644 --- a/src/backends/cl/workloads/ClMergerWorkload.hpp +++ b/src/backends/cl/workloads/ClMergerWorkload.hpp @@ -24,8 +24,7 @@ public: void Execute() const override; private: - mutable arm_compute::CLConcatenateLayer m_Layer; - bool m_Execute; + mutable std::unique_ptr<arm_compute::CLConcatenateLayer> m_Layer; }; } //namespace armnn diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 46a7e6f79f..898660cc91 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -52,10 +52,7 @@ bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported) #if defined(ARMCOMPUTENEON_ENABLED) return true; #else - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "The armnn library has been built without NEON support"; - } + SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support"); return false; #endif } @@ -304,7 +301,14 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> in const OriginsDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported) const { - if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1) + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels { FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate, reasonIfUnsupported, @@ -312,13 +316,23 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> in output, descriptor); } - else - { - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); - } + else if (concatInnerAxis == 3) + { + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported."); + return false; + } } bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 101e59d0c4..8db5f9ad84 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -61,6 +61,12 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITenso coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex])); } + const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape()); + if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape)) + { + return nullptr; + } + return std::make_unique<NeonSubTensorHandle>( boost::polymorphic_downcast<INeonTensorHandle*>(&parent), shape, coords); } diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp index be096b4b25..64d4d93d97 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.cpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp @@ -11,12 +11,20 @@ #include <backendsCommon/CpuTensorHandle.hpp> #include <neon/NeonTensorHandle.hpp> -#include <arm_compute/runtime/NEON/functions/NEConcatenateLayer.h> + namespace armnn { using namespace armcomputetensorutils; +namespace +{ +size_t CalcAxis(const armnn::MergerDescriptor& desc) +{ + return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; +} +} //namespace + arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs, const TensorInfo& output, const MergerDescriptor& descriptor) @@ -25,60 +33,66 @@ arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInf std::vector<arm_compute::TensorInfo> aclInputs; for (const TensorInfo* input : inputs) { - arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); - aclInputs.emplace_back(aclInputInfo); + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); + aclInputs.emplace_back(aclInputInfo); } const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; - std::vector<arm_compute::ITensorInfo*> aclInputPtrs; for (arm_compute::ITensorInfo& input : aclInputs) { aclInputPtrs.emplace_back(&input); } + size_t aclAxis = CalcAxis(descriptor); return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); - } NeonMergerWorkload::NeonMergerWorkload( const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload<MergerQueueDescriptor>(descriptor, info) { - m_Execute = true; + bool allInputsAreSubtensors = true; - unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + // Check that all inputs are sub-tensors + for (auto input : descriptor.m_Inputs) + { + if (!input->GetParent()) + { + // Non sub-tensor input found so we need to execute the merger function + allInputsAreSubtensors = false; + break; + } + } - if (innerAxisOrder != 1) + if (allInputsAreSubtensors) { - m_Execute = false; + // Can skip configuring the merger function since it's not executed return; } std::vector<arm_compute::ITensor *> aclInputs; - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); for (auto input : m_Data.m_Inputs) { arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<INeonTensorHandle>(input)->GetTensor(); - aclInput.info()->set_data_layout(aclDataLayout); aclInputs.emplace_back(&aclInput); } arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>( - m_Data.m_Outputs[0])->GetTensor(); - output.info()->set_data_layout(aclDataLayout); + m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + // Create the layer function + m_Layer.reset(new arm_compute::NEConcatenateLayer()); - auto layer = std::make_unique<arm_compute::NEConcatenateLayer>(); - layer->configure(aclInputs, &output, aclAxis); - m_Layer.reset(layer.release()); + // Configure input and output tensors + size_t aclAxis = CalcAxis(descriptor.m_Parameters); + m_Layer->configure(aclInputs, &output, aclAxis); + // Prepare m_Layer->prepare(); } void NeonMergerWorkload::Execute() const { - if (m_Execute) + if (m_Layer) { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute"); m_Layer->run(); diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp index 3432c626cb..1dd9309aff 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.hpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp @@ -9,7 +9,8 @@ #include <arm_compute/core/Error.h> #include <arm_compute/runtime/IFunction.h> -# +#include <arm_compute/runtime/NEON/functions/NEConcatenateLayer.h> + #include <memory> namespace armnn @@ -27,9 +28,7 @@ public: void Execute() const override; private: - std::unique_ptr<arm_compute::IFunction> m_Layer; - bool m_Execute; - + std::unique_ptr<arm_compute::NEConcatenateLayer> m_Layer; }; } //namespace armnn |