From 5236e1d6bcff6ebec7ec10d7d416cc6ead5482dd Mon Sep 17 00:00:00 2001 From: Keith Davis Date: Mon, 4 Nov 2019 08:58:33 +0000 Subject: IVGCVSW-3835 Create Encoder and Decoder for QSymm8PerAxis MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add QuantizedSymm8PerAxis to armnn DataType (types.hpp) and * Add Quantize and Dequantize template for int8 in TypeUtils to be able to compute QSymm8 of the weight * Create PerAxisIterator for per-axis quantization * Create QSymm8PerAxisDecoder * Create QSymm8PerAxisEncoder Signed-off-by: Keith Davis Change-Id: Ibcfe0288a197b7ee50b543bdbd77b7edb8a547c2 --- src/armnn/TypesUtils.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'src/armnn/TypesUtils.cpp') diff --git a/src/armnn/TypesUtils.cpp b/src/armnn/TypesUtils.cpp index cdc30da8ca..83c56c491c 100644 --- a/src/armnn/TypesUtils.cpp +++ b/src/armnn/TypesUtils.cpp @@ -33,6 +33,10 @@ float armnn::Dequantize(QuantizedType value, float scale, int32_t offset) return dequantized; } +/// Explicit specialization of Quantize for int8_t +template +int8_t armnn::Quantize(float value, float scale, int32_t offset); + /// Explicit specialization of Quantize for uint8_t template uint8_t armnn::Quantize(float value, float scale, int32_t offset); @@ -45,6 +49,10 @@ int16_t armnn::Quantize(float value, float scale, int32_t offset); template int32_t armnn::Quantize(float value, float scale, int32_t offset); +/// Explicit specialization of Dequantize for int8_t +template +float armnn::Dequantize(int8_t value, float scale, int32_t offset); + /// Explicit specialization of Dequantize for uint8_t template float armnn::Dequantize(uint8_t value, float scale, int32_t offset); -- cgit v1.2.1