From 9add1200f5840e263115b48e17a6397ce3ae2d74 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Fri, 7 Feb 2020 10:06:33 +0000 Subject: IVGCVSW-4386 Add ArmNN reference support for QAsymmS8 * Added Quantization Scheme for QAsymmS8 * Added Unit Tests for QAsymmS8 * Renamed QAsymm8 calls to QAsymmU8 Signed-off-by: Ryan OShea Change-Id: I897b4e018ba1d808cc3f8c113f2be2dbad49c8db --- src/backends/aclCommon/ArmComputeTensorUtils.cpp | 2 + src/backends/backendsCommon/WorkloadData.cpp | 4 +- .../test/layerTests/DequantizeTestImpl.cpp | 14 +++++++ .../test/layerTests/DequantizeTestImpl.hpp | 8 ++++ .../test/layerTests/QuantizeTestImpl.cpp | 7 ++++ .../test/layerTests/QuantizeTestImpl.hpp | 4 ++ src/backends/reference/RefLayerSupport.cpp | 9 +++-- src/backends/reference/test/RefLayerTests.cpp | 3 ++ src/backends/reference/workloads/BaseIterator.hpp | 43 ++++++++++++++++++++++ src/backends/reference/workloads/Decoders.hpp | 7 ++++ src/backends/reference/workloads/Encoders.hpp | 7 ++++ 11 files changed, 104 insertions(+), 4 deletions(-) (limited to 'src/backends') diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp index d2bb6df625..49fef5bf17 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp @@ -23,6 +23,8 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi return arm_compute::DataType::F16; case armnn::DataType::Float32: return arm_compute::DataType::F32; + case armnn::DataType::QAsymmS8: + return arm_compute::DataType::QASYMM8_SIGNED; case armnn::DataType::QAsymmU8: return arm_compute::DataType::QASYMM8; case armnn::DataType::QSymmS16: diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 8bf2b0f988..ebaf961fe8 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2201,13 +2201,15 @@ void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::Float32, DataType::Float16, DataType::QSymmS8, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); - if (outputTensorInfo.GetDataType() != DataType::QAsymmU8 && + if (outputTensorInfo.GetDataType() != DataType::QAsymmS8 && + outputTensorInfo.GetDataType() != DataType::QAsymmU8 && outputTensorInfo.GetDataType() != DataType::QSymmS8 && outputTensorInfo.GetDataType() != DataType::QSymmS16) { diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp index 993915dc0b..91d56bb492 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp @@ -143,6 +143,20 @@ LayerTestResult DequantizeOffsetUint8Test( return DequantizeOffsetTest(workloadFactory, memoryManager); } +LayerTestResult DequantizeSimpleAsymmInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest(workloadFactory, memoryManager); +} + +LayerTestResult DequantizeOffsetAsymmInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeOffsetTest(workloadFactory, memoryManager); +} + LayerTestResult DequantizeSimpleInt8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp index c70f03e8f3..1e079a75bf 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp @@ -20,6 +20,14 @@ LayerTestResult DequantizeOffsetUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult DequantizeSimpleAsymmInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult DequantizeOffsetAsymmInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult DequantizeSimpleInt8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp index 9c2bc84a05..e8996d4a51 100644 --- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp @@ -139,6 +139,13 @@ LayerTestResult QuantizeClampUint8Test( return QuantizeClampTest(workloadFactory, memoryManager); } +LayerTestResult QuantizeClampAsymmInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeClampTest(workloadFactory, memoryManager); +} + LayerTestResult QuantizeClampInt8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp index ece75fd43b..e16466c445 100644 --- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.hpp @@ -18,6 +18,10 @@ LayerTestResult QuantizeClampUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult QuantizeClampAsymmInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult QuantizeClampInt8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 0ca19bbb06..c60348e529 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -610,7 +610,8 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input, { bool supported = true; - std::array supportedInputTypes = { + std::array supportedInputTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 @@ -1439,9 +1440,10 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, bool supported = true; // Define supported input types. - std::array supportedInputTypes = { + std::array supportedInputTypes = { DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 @@ -1451,8 +1453,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, "Reference quantize: input type not supported."); // Define supported output types. - std::array supportedOutputTypes = { + std::array supportedOutputTypes = { DataType::QAsymmU8, + DataType::QAsymmS8, DataType::QSymmS8, DataType::QSymmS16 }; diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index b0d8db802e..99468e0006 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1419,6 +1419,8 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16) // Dequantize ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleAsymmInt8, DequantizeSimpleAsymmInt8Test) +ARMNN_AUTO_TEST_CASE(DequantizeOffsetAsymmInt8, DequantizeOffsetAsymmInt8Test) ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt8, DequantizeSimpleInt8Test) ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test) ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test) @@ -1428,6 +1430,7 @@ ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Tes // Quantize ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test) +ARMNN_AUTO_TEST_CASE(QuantizeClampAsymmInt8, QuantizeClampAsymmInt8Test) ARMNN_AUTO_TEST_CASE(QuantizeClampInt8, QuantizeClampInt8Test) ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test) diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index 581aabfcd2..c48201837b 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -137,6 +137,25 @@ private: const int32_t m_Offset; }; +class QASymmS8Decoder : public TypedIterator> +{ +public: + QASymmS8Decoder(const int8_t* data, const float scale, const int32_t offset) + : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + + QASymmS8Decoder(const float scale, const int32_t offset) + : QASymmS8Decoder(nullptr, scale, offset) {} + + float Get() const override + { + return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); + } + +private: + const float m_Scale; + const int32_t m_Offset; +}; + class QSymmS8Decoder : public TypedIterator> { public: @@ -264,6 +283,30 @@ private: const int32_t m_Offset; }; +class QASymmS8Encoder : public TypedIterator> +{ +public: + QASymmS8Encoder(int8_t* data, const float scale, const int32_t offset) + : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + + QASymmS8Encoder(const float scale, const int32_t offset) + : QASymmS8Encoder(nullptr, scale, offset) {} + + void Set(float right) override + { + *m_Iterator = armnn::Quantize(right, m_Scale, m_Offset); + } + + float Get() const override + { + return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); + } + +private: + const float m_Scale; + const int32_t m_Offset; +}; + class QSymmS8Encoder : public TypedIterator> { public: diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index 6f309787bd..6a8c756048 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -81,6 +81,13 @@ inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const params.first); } ARMNN_NO_DEPRECATE_WARN_END + case DataType::QAsymmS8: + { + return std::make_unique( + static_cast(data), + info.GetQuantizationScale(), + info.GetQuantizationOffset()); + } case DataType::QAsymmU8: { return std::make_unique( diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index 8ddd559448..f52297602f 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -32,6 +32,13 @@ inline std::unique_ptr> MakeEncoder(const TensorInfo& info, void* params.first); } ARMNN_NO_DEPRECATE_WARN_END + case armnn::DataType::QAsymmS8: + { + return std::make_unique( + static_cast(data), + info.GetQuantizationScale(), + info.GetQuantizationOffset()); + } case armnn::DataType::QAsymmU8: { return std::make_unique( -- cgit v1.2.1