From b41793a9f9afc43fb04a991ca819818fca8faab8 Mon Sep 17 00:00:00 2001 From: John Mcloughlin Date: Mon, 16 Oct 2023 10:28:40 +0100 Subject: IVGCVSW-7752 DTS: Fix QuantizePerChannel tests * Added validation for scale on all Quantized types * Added Encoder for Per Axis UINT16 Symmetrical Quantized type * Added error for Per Axis Asymmetrical Quantized type not supported Signed-off-by: John Mcloughlin Change-Id: I433519ccacd71219a92bde2b81955d6abf9219c5 --- src/armnn/Network.cpp | 65 ++++++++++++++--------- src/backends/reference/workloads/BaseIterator.hpp | 29 +++++++++- src/backends/reference/workloads/Encoders.hpp | 24 +++++++-- 3 files changed, 87 insertions(+), 31 deletions(-) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index d2b14cd045..f18c6bfb48 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -799,31 +799,46 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, OptionalGetOutputSlot(i); TensorInfo info = outputSlot.GetTensorInfo(); - if (DataType::QAsymmU8 == info.GetDataType()) - { - if (0.f == info.GetQuantizationScale()) - { - noErrors = false; - std::stringstream ss; - ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) - << " (" << layer->GetNameStr() << ") is of type" - << " Quantized 8 bit but its scale parameter has not been set"; - ReportError(ss.str(), errMessages); - } - // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0 - if ((info.GetQuantizationScale() != (1.0f / 256.0f) || - info.GetQuantizationOffset() != 0) && - layer->GetType() == armnn::LayerType::Softmax) - { - std::stringstream ss; - ss << "Quantization parameters for Softmax layer (Scale: " << - info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() << - ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0"; - ARMNN_LOG(warning) << ss.str(); - info.SetQuantizationScale((1.0f /256.0f)); - info.SetQuantizationOffset(0); - outputSlot.SetTensorInfo(info); - } + auto quantizationDataType = info.GetDataType(); + auto quantizationScales = info.GetQuantizationScales(); + // For any Quantized Tensor ensure scale(s) are set + switch(quantizationDataType) { + case DataType::QAsymmU8: + case DataType::QSymmS16: + case DataType::QSymmS8: + case DataType::QAsymmS8: + if ((quantizationDataType == DataType::QAsymmU8 || quantizationDataType == DataType::QAsymmS8) + && info.HasPerAxisQuantization()) { + throw InvalidArgumentException("Per Axis Quantization is not supported in " + "Asymmetric Quantization Datatype."); + } + if ((!info.HasPerAxisQuantization() && info.GetQuantizationScale() == 0.f) + || (info.HasPerAxisQuantization() && (quantizationScales.end() != + std::find(quantizationScales.begin(), quantizationScales.end(), 0.f)))) { + noErrors = false; + std::stringstream ss; + ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) + << " (" << layer->GetNameStr() << ") is of type" + << " Quantized value but the scale parameter has not been set"; + ReportError(ss.str(), errMessages); + } + // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0 + if (!info.HasPerAxisQuantization() && quantizationDataType == DataType::QAsymmU8 && + (info.GetQuantizationScale() != (1.0f / 256.0f) || + info.GetQuantizationOffset() != 0) && + layer->GetType() == armnn::LayerType::Softmax) { + std::stringstream ss; + ss << "Quantization parameters for Softmax layer (Scale: " << + info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() << + ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0"; + ARMNN_LOG(warning) << ss.str(); + info.SetQuantizationScale((1.0f / 256.0f)); + info.SetQuantizationOffset(0); + outputSlot.SetTensorInfo(info); + } + break; + default: + break; } } return noErrors; diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index 2d27951b73..1665c1ff46 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -896,4 +896,31 @@ private: std::vector m_Scales; }; +class QSymm16PerAxisEncoder : public PerAxisIterator> +{ +public: + QSymm16PerAxisEncoder(int16_t* data, const std::vector& scale, + unsigned int axisFactor, unsigned int axisDimensionality) + : PerAxisIterator(data, axisFactor, axisDimensionality), m_Scale(scale) {} + + void Set(float right) + { + *m_Iterator = armnn::Quantize(right, m_Scale[m_AxisIndex], 0); + } + + float Get() const + { + return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0); + } + + // Get scale of the current value + float GetScale() const + { + return m_Scale[m_AxisIndex]; + } + +private: + std::vector m_Scale; +}; + } // namespace armnn diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index d6d611494d..8a702377b2 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -56,10 +56,24 @@ inline std::unique_ptr> MakeEncoder(const TensorInfo& info, void* } case armnn::DataType::QSymmS16: { - return std::make_unique( - static_cast(data), - info.GetQuantizationScale(), - info.GetQuantizationOffset()); + if (info.HasPerAxisQuantization()) + { + unsigned int axis = info.GetQuantizationDim().value(); + auto axisDimensionality = info.GetShape()[axis]; + std::pair> params = armnnUtils::GetPerAxisParams(info); + return std::make_unique( + static_cast(data), + params.second, + params.first, + axisDimensionality); + } + else + { + return std::make_unique( + static_cast(data), + info.GetQuantizationScale(), + info.GetQuantizationOffset()); + } } case armnn::DataType::Signed32: { -- cgit v1.2.1