From 4d85adf436092d01ca0957967156e36060e8be68 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 27 Oct 2022 11:37:29 +0100 Subject: IVGCVSW-7296 REDUCE_PROD tests fail when using Tf 2.10 * In TF what ArmNN calls quantized data types can be non-quantized as well. * This patch creates 2 models: * ArmNN: model where int8 and uint8 will always be quantized, but scale can be 1 and offset 0 * TFLite: model where int8 and uint8 can be quantized and non-quantized Signed-off-by: Teresa Charlin Change-Id: Id960f2f30988f2bbec88cb4e0c52c189ac957bae --- src/armnn/Network.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'src/armnn') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index d3ce7ab62c..9d00a69518 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -574,8 +574,10 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, OptionalGetOutputSlot(i); TensorInfo info = outputSlot.GetTensorInfo(); - if (DataType::QAsymmU8 == info.GetDataType()) { - if (0.f == info.GetQuantizationScale()) { + if (DataType::QAsymmU8 == info.GetDataType()) + { + if (0.f == info.GetQuantizationScale()) + { noErrors = false; std::stringstream ss; ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType()) -- cgit v1.2.1