From ed66d14ec0840282f74241dda46e02194278c3cc Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Fri, 6 Dec 2019 09:55:55 +0000 Subject: IVGCVSW-4191 Add DEQUANTIZE to the TfLite Parser !armnn:2421 Change-Id: Icdb02b7248ed408c3c8ad2e3e38df5b7cda1c545 Signed-off-by: Finn Williams --- src/armnnTfLiteParser/test/Dequantize.cpp | 121 ++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 src/armnnTfLiteParser/test/Dequantize.cpp (limited to 'src/armnnTfLiteParser/test/Dequantize.cpp') diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp new file mode 100644 index 0000000000..2f98c07a66 --- /dev/null +++ b/src/armnnTfLiteParser/test/Dequantize.cpp @@ -0,0 +1,121 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + + struct DequantizeFixture : public ParserFlatbuffersFixture + { + explicit DequantizeFixture(const std::string & inputShape, + const std::string & outputShape, + const std::string & dataType) + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "DEQUANTIZE" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape + R"(, + "type": )" + dataType + R"(, + "buffer": 0, + "name": "inputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.5 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"( , + "type": "FLOAT32", + "buffer": 1, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0 ], + "outputs": [ 1 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0 ], + "outputs": [ 1 ], + "builtin_options_type": "DequantizeOptions", + "builtin_options": { + }, + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { }, + ] + } + )"; + SetupSingleInputSingleOutput("inputTensor", "outputTensor"); + } + }; + + struct SimpleDequantizeFixtureQAsymm8 : DequantizeFixture + { + SimpleDequantizeFixtureQAsymm8() : DequantizeFixture("[ 1, 6 ]", + "[ 1, 6 ]", + "UINT8") {} + }; + + BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8) + { + RunTest<2, armnn::DataType::QuantisedAsymm8 , armnn::DataType::Float32>( + 0, + {{"inputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}}, + {{"outputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}}); + } + + struct SimpleDequantizeFixtureQSymm16 : DequantizeFixture + { + SimpleDequantizeFixtureQSymm16() : DequantizeFixture("[ 1, 6 ]", + "[ 1, 6 ]", + "INT16") {} + }; + + BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16) + { + RunTest<2, armnn::DataType::QuantisedSymm16 , armnn::DataType::Float32>( + 0, + {{"inputTensor", { 0, 1, 5, 32767, -1, -32768 }}}, + {{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}}); + } + + struct SimpleDequantizeFixtureQSymmS8 : DequantizeFixture + { + SimpleDequantizeFixtureQSymmS8() : DequantizeFixture("[ 1, 6 ]", + "[ 1, 6 ]", + "INT8") {} + }; + + BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQSymmS8, SimpleDequantizeFixtureQSymmS8) + { + RunTest<2, armnn::DataType::QSymmS8 , armnn::DataType::Float32>( + 0, + {{"inputTensor", { 0, 1, 5, 127, -128, -1 }}}, + {{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}}); + } + +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1