From a023c40a7644f0b32f8f593b9a3614c92b5f933d Mon Sep 17 00:00:00 2001 From: Kevin May Date: Thu, 12 Dec 2019 17:28:05 +0000 Subject: IVGCVSW-4262 Use ACL Permute and Reshape Validate function in Neon and CL !android-nn-driver:2642 Signed-off-by: Kevin May Change-Id: Ibabb73c0ae0df2e530a68398f75c76e6b80c0701 --- include/armnn/ILayerSupport.hpp | 1 + src/armnn/LayerSupport.cpp | 3 ++- src/backends/backendsCommon/LayerSupportBase.cpp | 1 + src/backends/backendsCommon/LayerSupportBase.hpp | 1 + src/backends/backendsCommon/WorkloadFactory.cpp | 2 ++ src/backends/cl/ClLayerSupport.cpp | 6 +++--- src/backends/cl/ClLayerSupport.hpp | 1 + src/backends/cl/workloads/ClPermuteWorkload.cpp | 2 +- src/backends/cl/workloads/ClReshapeWorkload.cpp | 9 +++++++++ src/backends/cl/workloads/ClReshapeWorkload.hpp | 3 +++ src/backends/neon/NeonLayerSupport.cpp | 10 ++++++---- src/backends/neon/NeonLayerSupport.hpp | 1 + src/backends/neon/workloads/NeonReshapeWorkload.cpp | 9 +++++++++ src/backends/neon/workloads/NeonReshapeWorkload.hpp | 5 +++++ src/backends/reference/RefLayerSupport.cpp | 2 ++ src/backends/reference/RefLayerSupport.hpp | 1 + 16 files changed, 48 insertions(+), 9 deletions(-) diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 1615d3e24e..d1bbf99d5e 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -285,6 +285,7 @@ public: Optional reasonIfUnsupported = EmptyOptional()) const = 0; virtual bool IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const = 0; diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 08d91fc20b..3c244b0454 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -530,11 +530,12 @@ bool IsPreluSupported(const BackendId& backend, bool IsReshapeSupported(const BackendId& backend, const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, char* reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor); + FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, output, descriptor); } bool IsResizeSupported(const BackendId& backend, diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index b19356f955..449b8098d6 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -444,6 +444,7 @@ bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& /*input*/, } bool LayerSupportBase::IsReshapeSupported(const TensorInfo& /*input*/, + const TensorInfo& /*output*/, const ReshapeDescriptor& /*descriptor*/, Optional reasonIfUnsupported) const { diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 7a65eb55ed..459ac03b6f 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -271,6 +271,7 @@ public: Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index acb73b589d..5671761723 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -795,7 +795,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), cLayer->GetParameters(), reason); break; diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index f8cc5074b3..ffe68a33d0 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -46,6 +46,7 @@ #include "workloads/ClPermuteWorkload.hpp" #include "workloads/ClPooling2dWorkload.hpp" #include "workloads/ClPreluWorkload.hpp" +#include "workloads/ClReshapeWorkload.hpp" #include "workloads/ClResizeWorkload.hpp" #include "workloads/ClRsqrtWorkload.hpp" #include "workloads/ClQuantizedLstmWorkload.hpp" @@ -670,13 +671,12 @@ bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input, } bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported) const { - ignore_unused(input); ignore_unused(descriptor); - ignore_unused(reasonIfUnsupported); - return true; + FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output); } bool ClLayerSupport::IsResizeSupported(const TensorInfo& input, diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 9371717013..819d086cb4 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -216,6 +216,7 @@ public: Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/cl/workloads/ClPermuteWorkload.cpp b/src/backends/cl/workloads/ClPermuteWorkload.cpp index dd495c8288..41bce1d4fa 100644 --- a/src/backends/cl/workloads/ClPermuteWorkload.cpp +++ b/src/backends/cl/workloads/ClPermuteWorkload.cpp @@ -23,7 +23,7 @@ arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo& input, const armnn::PermutationVector& mappings = descriptor.m_DimMappings; return arm_compute::CLPermute::validate(&aclInputInfo, &aclOutputInfo, - armcomputetensorutils::BuildArmComputePermutationVector(mappings)); + armcomputetensorutils::BuildArmComputePermutationVector(mappings)); } ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, diff --git a/src/backends/cl/workloads/ClReshapeWorkload.cpp b/src/backends/cl/workloads/ClReshapeWorkload.cpp index db1702a74f..d752290444 100644 --- a/src/backends/cl/workloads/ClReshapeWorkload.cpp +++ b/src/backends/cl/workloads/ClReshapeWorkload.cpp @@ -12,6 +12,15 @@ namespace armnn { +arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::CLReshapeLayer::validate(&aclInputInfo, &aclOutputInfo); +} + ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) { diff --git a/src/backends/cl/workloads/ClReshapeWorkload.hpp b/src/backends/cl/workloads/ClReshapeWorkload.hpp index a7b464e719..62f5fccec8 100644 --- a/src/backends/cl/workloads/ClReshapeWorkload.hpp +++ b/src/backends/cl/workloads/ClReshapeWorkload.hpp @@ -12,6 +12,9 @@ namespace armnn { +arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output); + class ClReshapeWorkload : public BaseWorkload { public: diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 6ca69f4841..b8725be005 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -47,6 +47,7 @@ #include "workloads/NeonPreluWorkload.hpp" #include "workloads/NeonQuantizeWorkload.hpp" #include "workloads/NeonQuantizedLstmWorkload.hpp" +#include "workloads/NeonReshapeWorkload.hpp" #include "workloads/NeonResizeWorkload.hpp" #include "workloads/NeonRsqrtWorkload.hpp" #include "workloads/NeonSliceWorkload.hpp" @@ -650,14 +651,15 @@ bool NeonLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input, } bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported) const { ignore_unused(descriptor); - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate, + reasonIfUnsupported, + input, + output); } bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index f1d87f65f3..56a70c4886 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -222,6 +222,7 @@ public: Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp index 7f2056c8e2..659bb94723 100644 --- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp +++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp @@ -14,6 +14,15 @@ namespace armnn { +arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo& input, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEReshapeLayer::validate(&aclInputInfo, &aclOutputInfo); +} + NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.hpp b/src/backends/neon/workloads/NeonReshapeWorkload.hpp index 2202463928..186a02ba26 100644 --- a/src/backends/neon/workloads/NeonReshapeWorkload.hpp +++ b/src/backends/neon/workloads/NeonReshapeWorkload.hpp @@ -6,7 +6,10 @@ #pragma once #include +#include +#include +#include #include #include @@ -14,6 +17,8 @@ namespace armnn { +arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo& input, const TensorInfo& output); + class NeonReshapeWorkload : public BaseWorkload { public: diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index ee6462dfa3..b801f70724 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1451,9 +1451,11 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, } bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported) const { + ignore_unused(output); ignore_unused(descriptor); // Define supported output types. std::array supportedOutputTypes = diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 123c2643df..1551a55694 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -242,6 +242,7 @@ public: Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsReshapeSupported(const TensorInfo& input, + const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; -- cgit v1.2.1