From 245d64c60d0ea30f5080ff53225b5169927e24d6 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Mon, 2 Dec 2019 12:59:43 +0000 Subject: Work in progress of python bindings for Arm NN Not built or tested in any way Signed-off-by: Matthew Bentham Change-Id: Ie7f92b529aa5087130f0c5cc8c17db1581373236 --- .../_quantization/quantize_and_dequantize.py | 70 ++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py (limited to 'python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py') diff --git a/python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py b/python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py new file mode 100644 index 0000000000..7f06b43bc8 --- /dev/null +++ b/python/pyarmnn/src/pyarmnn/_quantization/quantize_and_dequantize.py @@ -0,0 +1,70 @@ +# Copyright © 2019 Arm Ltd. All rights reserved. +# SPDX-License-Identifier: MIT + +from .._generated.pyarmnn import Quantize_uint8_t, Quantize_int16_t, Quantize_int32_t, \ + Dequantize_uint8_t, Dequantize_int16_t, Dequantize_int32_t + +__dtype_to_quantize_function = { + 'uint8': Quantize_uint8_t, + 'int16': Quantize_int16_t, + 'int32': Quantize_int32_t + } + +__dtype_to_dequantize_function = { + 'uint8': ((0, 255), Dequantize_uint8_t), + 'int16': ((-32768, 32767), Dequantize_int16_t), + 'int32': ((-2147483648, 2147483647), Dequantize_int32_t) + } + + +def quantize(value: float, scale: float, offset: int, target_dtype: str) -> int: + """Quantize given value to the given target datatype using Arm NN. + + This function can be used to convert a 32-bit floating point value into 16/32-bit + integer or 8-bit unsigned integer values. + + Args: + value (float): The value to be quantized. + scale (float): A numeric constant that the value is multiplied by. + offset (int): A 'zero-point' used to 'shift' the integer range. + target_dtype (str): The target data type. Supported values: 'unit8', 'int16', 'int32'. + + Returns: + int: A quantized 8-bit unsigned integer value or 16/32-bit integer value. + """ + + if target_dtype not in __dtype_to_quantize_function: + raise ValueError("""Unexpected target datatype {} given. + Armnn currently supports quantization to {} values.""".format(target_dtype, list(__dtype_to_quantize_function.keys()))) + + return __dtype_to_quantize_function[target_dtype](float(value), scale, offset) + + +def dequantize(value: int, scale: float, offset: float, from_dtype: str) -> float: + """Dequantize given value from the given datatype using Armnn. + + This function can be used to convert an 8-bit unsigned integer value or 16/32-bit + integer value into a 32-bit floating point value. Typically used when decoding an + output value from an output tensor on a quantized model. + + Args: + value (int): The value to be dequantized. Value could be numpy numeric data type. + scale (float): A numeric constant that the value is multiplied by. + offset (float): A 'zero-point' used to 'shift' the integer range. + from_dtype (str): The data type 'value' represents. Supported values: 'unit8', 'int16', 'int32'. + + Returns: + float: A dequantized 32-bit floating-point value. + """ + + # specifies which function to use with given datatype and the value range for that data type. + if from_dtype not in __dtype_to_dequantize_function: + raise ValueError("""Unexpected value datatype {} given. + Armnn currently supports dequantization from {} values.""".format(from_dtype, list(__dtype_to_dequantize_function.keys()))) + + input_range = __dtype_to_dequantize_function[from_dtype][0] + + if not input_range[0] <= value <= input_range[1]: + raise ValueError('Value is not within range of the given datatype {}'.format(from_dtype)) + + return __dtype_to_dequantize_function[from_dtype][1](int(value), scale, offset) -- cgit v1.2.1