diff options
Diffstat (limited to 'src/backends/neon')
4 files changed, 10 insertions, 5 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index dfaea5c81c..a79f4c0365 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -279,9 +279,10 @@ bool IsInputSupportedNeon(const TensorInfo& input, bool IsL2NormalizationSupportedNeon(const TensorInfo& input, const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, std::string* reasonIfUnsupported) { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 95b14b3ba6..419c226b3c 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -81,6 +81,7 @@ bool IsInputSupportedNeon(const TensorInfo& input, bool IsL2NormalizationSupportedNeon(const TensorInfo& input, const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, std::string* reasonIfUnsupported = nullptr); bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs, diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index dee789af85..4bddd9a24c 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -8,12 +8,14 @@ namespace armnn { +using namespace armcomputetensorutils; arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, - const TensorInfo& output) + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor) { - const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input); - const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::NormalizationLayerInfo normalizationInfo = CreateAclNormalizationLayerInfoForL2Normalization(input); diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp index c1221fb98c..70ab385493 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.hpp @@ -14,7 +14,8 @@ namespace armnn { arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, - const TensorInfo& output); + const TensorInfo& output, + const L2NormalizationDescriptor& descriptor); class NeonL2NormalizationFloatWorkload : public FloatWorkload<L2NormalizationQueueDescriptor> { |