From ddb1d06dbcb5dc4a89a237ac1176279669817f46 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 10 Mar 2020 13:51:45 +0000 Subject: MLCE-159 Add QAsymmS8 to ArmnnQuantizer * Allow per layer quantization from Fp32 to Int8 (QAsymmS8) like TfLite Signed-off-by: Francis Murtagh Change-Id: I5bbf770aa29d81af3568c15b47d2b2c18e55bb28 --- src/backends/backendsCommon/WorkloadData.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/backends/backendsCommon/WorkloadData.cpp') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index dbd1158380..bb0c21ffba 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -994,6 +994,7 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c { DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1183,8 +1184,8 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co std::vector supportedTypes = { DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16, DataType::QSymmS8, DataType::Float16 -- cgit v1.2.1