From ddb1d06dbcb5dc4a89a237ac1176279669817f46 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 10 Mar 2020 13:51:45 +0000 Subject: MLCE-159 Add QAsymmS8 to ArmnnQuantizer * Allow per layer quantization from Fp32 to Int8 (QAsymmS8) like TfLite Signed-off-by: Francis Murtagh Change-Id: I5bbf770aa29d81af3568c15b47d2b2c18e55bb28 --- src/backends/backendsCommon/test/WorkloadTestUtils.hpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/backends/backendsCommon/test') diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp index 0b0f265db4..51683335e1 100644 --- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp +++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp @@ -98,6 +98,8 @@ inline armnn::Optional GetBiasTypeFromWeightsType(armnn::Option case armnn::DataType::Float16: case armnn::DataType::Float32: return weightsType; + case armnn::DataType::QAsymmS8: + return armnn::DataType::Signed32; case armnn::DataType::QAsymmU8: return armnn::DataType::Signed32; case armnn::DataType::QSymmS16: -- cgit v1.2.1