From 98c0f66c9f5fa5055d87c5c4b71949f7b5d1a678 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Thu, 21 Nov 2019 15:54:36 +0000 Subject: IVGCVSW-4151 HAL 1_2 Dequantize FP32 Per Channel Tests on CpuAcc Failing * Quantization dimension other than 0 is not supported. Signed-off-by: Sadik Armagan Change-Id: Ied3914600a754b799e98d5660ad6196c8c4fa23d --- ConversionUtils.hpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index a284a50a..6f1f100d 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -2220,6 +2220,13 @@ bool ConvertDequantize(const Operation& operation, const Model& model, Conversio return Fail("%s: Operation has invalid input", __func__); } + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::Optional& quantizationDim = inputInfo.GetQuantizationDim(); + if (quantizationDim.has_value() && quantizationDim.value() != 0) + { + return Fail("%s: Operation has quantization dimension different than 0", __func__); + } + const Operand* const outputOperand = GetOutputOperand(operation, 0, model); if (!outputOperand) { @@ -2237,8 +2244,8 @@ bool ConvertDequantize(const Operation& operation, const Model& model, Conversio IsDequantizeSupported, data.m_Backends, isSupported, - input.GetTensorInfo(), - GetTensorInfoForOperand(*outputOperand)); + inputInfo, + outputInfo); if (!isSupported) { return false; -- cgit v1.2.1