From b41793a9f9afc43fb04a991ca819818fca8faab8 Mon Sep 17 00:00:00 2001 From: John Mcloughlin Date: Mon, 16 Oct 2023 10:28:40 +0100 Subject: IVGCVSW-7752 DTS: Fix QuantizePerChannel tests * Added validation for scale on all Quantized types * Added Encoder for Per Axis UINT16 Symmetrical Quantized type * Added error for Per Axis Asymmetrical Quantized type not supported Signed-off-by: John Mcloughlin Change-Id: I433519ccacd71219a92bde2b81955d6abf9219c5 --- src/armnn/Network.cpp | 65 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 25 deletions(-) (limited to 'src/armnn') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index d2b14cd045..f18c6bfb48 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -799,31 +799,46 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, OptionalGetOutputSlot(i); TensorInfo info = outputSlot.GetTensorInfo(); - if (DataType::QAsymmU8 == info.GetDataType()) - { - if (0.f == info.GetQuantizationScale()) - { - noErrors = false; - std::stringstream ss; - ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) - << " (" << layer->GetNameStr() << ") is of type" - << " Quantized 8 bit but its scale parameter has not been set"; - ReportError(ss.str(), errMessages); - } - // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0 - if ((info.GetQuantizationScale() != (1.0f / 256.0f) || - info.GetQuantizationOffset() != 0) && - layer->GetType() == armnn::LayerType::Softmax) - { - std::stringstream ss; - ss << "Quantization parameters for Softmax layer (Scale: " << - info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() << - ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0"; - ARMNN_LOG(warning) << ss.str(); - info.SetQuantizationScale((1.0f /256.0f)); - info.SetQuantizationOffset(0); - outputSlot.SetTensorInfo(info); - } + auto quantizationDataType = info.GetDataType(); + auto quantizationScales = info.GetQuantizationScales(); + // For any Quantized Tensor ensure scale(s) are set + switch(quantizationDataType) { + case DataType::QAsymmU8: + case DataType::QSymmS16: + case DataType::QSymmS8: + case DataType::QAsymmS8: + if ((quantizationDataType == DataType::QAsymmU8 || quantizationDataType == DataType::QAsymmS8) + && info.HasPerAxisQuantization()) { + throw InvalidArgumentException("Per Axis Quantization is not supported in " + "Asymmetric Quantization Datatype."); + } + if ((!info.HasPerAxisQuantization() && info.GetQuantizationScale() == 0.f) + || (info.HasPerAxisQuantization() && (quantizationScales.end() != + std::find(quantizationScales.begin(), quantizationScales.end(), 0.f)))) { + noErrors = false; + std::stringstream ss; + ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) + << " (" << layer->GetNameStr() << ") is of type" + << " Quantized value but the scale parameter has not been set"; + ReportError(ss.str(), errMessages); + } + // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0 + if (!info.HasPerAxisQuantization() && quantizationDataType == DataType::QAsymmU8 && + (info.GetQuantizationScale() != (1.0f / 256.0f) || + info.GetQuantizationOffset() != 0) && + layer->GetType() == armnn::LayerType::Softmax) { + std::stringstream ss; + ss << "Quantization parameters for Softmax layer (Scale: " << + info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() << + ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0"; + ARMNN_LOG(warning) << ss.str(); + info.SetQuantizationScale((1.0f / 256.0f)); + info.SetQuantizationOffset(0); + outputSlot.SetTensorInfo(info); + } + break; + default: + break; } } return noErrors; -- cgit v1.2.1