From 21fc28bb68d7a6232eea3a0e8cfd800d95346ea4 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Tue, 26 Nov 2019 14:04:54 +0000 Subject: IVGCVSW-3866 Add support for per-channel QSymm8 Convolution2d to Neon backend Signed-off-by: Aron Virginas-Tar Change-Id: If7d366b2894050c53a651016165f34be1a2ce7a6 --- src/armnn/CompatibleTypes.hpp | 6 ++++++ src/backends/neon/NeonLayerSupport.cpp | 5 ----- src/backends/neon/test/NeonLayerTests.cpp | 3 +++ src/backends/neon/workloads/NeonWorkloadUtils.hpp | 3 +++ 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp index 4e31aed2aa..06aa064f7b 100644 --- a/src/armnn/CompatibleTypes.hpp +++ b/src/armnn/CompatibleTypes.hpp @@ -35,6 +35,12 @@ inline bool CompatibleTypes(DataType dataType) return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8; } +template<> +inline bool CompatibleTypes(DataType dataType) +{ + return dataType == DataType::QuantisedSymm8 || dataType == DataType::QuantizedSymm8PerAxis; +} + template<> inline bool CompatibleTypes(DataType dataType) { diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index ed0f41a888..4474b12d37 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -274,11 +274,6 @@ bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { - if (weights.HasPerAxisQuantization()) - { - return false; - } - // Multiplier > 1.0f currently not supported in ACL if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f) { diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 26c55365cf..d74a4c6ebe 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -110,6 +110,9 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult2, false, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC); + // DepthToSpace ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_2, DepthToSpaceTest2, DataLayout::NCHW); diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp index f63946ec07..e9edc8901e 100644 --- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp +++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp @@ -46,6 +46,9 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor, case DataType::QuantisedAsymm8: CopyArmComputeTensorData(tensor, handle->GetConstTensor()); break; + case DataType::QuantizedSymm8PerAxis: + CopyArmComputeTensorData(tensor, handle->GetConstTensor()); + break; case DataType::Signed32: CopyArmComputeTensorData(tensor, handle->GetConstTensor()); break; -- cgit v1.2.1