aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/test/Quantize.cpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-06-29 15:50:08 +0100
committerfinn.williams <finn.williams@arm.com>2021-07-21 18:44:56 +0000
commitb49ed18ac76cbab23201598f08972cfed19cce4c (patch)
treeb100d19df7fdbfe009b2b8bf8048494bdab82a1a /src/armnnTfLiteParser/test/Quantize.cpp
parent9542f90d82a42096166d72109683abc165072297 (diff)
downloadarmnn-b49ed18ac76cbab23201598f08972cfed19cce4c.tar.gz
IVGCVSW-6176 Add support for shape_signature in the tflite parser
* tflite shape_signatures will now be the preferred way to detect dynamic tensors * add test utility to the parser that converts a model's tensors to dynamic * by default tests will run a dynamic version of the model in addition to the original * fix dynamic shape inference of unpack operator * reactivate and fix quantize test * add shape inference to expand dims Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: If11ba19d813cd3590707583dff1e4eb0e6412a1d
Diffstat (limited to 'src/armnnTfLiteParser/test/Quantize.cpp')
-rw-r--r--src/armnnTfLiteParser/test/Quantize.cpp25
1 files changed, 12 insertions, 13 deletions
diff --git a/src/armnnTfLiteParser/test/Quantize.cpp b/src/armnnTfLiteParser/test/Quantize.cpp
index c7c936e745..800edbdf46 100644
--- a/src/armnnTfLiteParser/test/Quantize.cpp
+++ b/src/armnnTfLiteParser/test/Quantize.cpp
@@ -1,21 +1,19 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ParserFlatbuffersFixture.hpp"
-#include "../TfLiteParser.hpp"
-#include <string>
-#include <iostream>
TEST_SUITE("TensorflowLiteParser_Quantize")
{
struct QuantizeFixture : public ParserFlatbuffersFixture
{
- explicit QuantizeFixture(const std::string & inputShape,
- const std::string & outputShape,
- const std::string & dataType)
+ explicit QuantizeFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& dataType,
+ const std::string& zeroPoint = "[ 0 ]")
{
m_JsonString = R"(
{
@@ -32,7 +30,7 @@ TEST_SUITE("TensorflowLiteParser_Quantize")
"min": [ 0.0 ],
"max": [ 255.0 ],
"scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "zero_point": )" + zeroPoint + R"(,
}
},
{
@@ -44,7 +42,7 @@ TEST_SUITE("TensorflowLiteParser_Quantize")
"min": [ 0.0 ],
"max": [ 255.0 ],
"scale": [ 1.5 ],
- "zero_point": [ 0 ],
+ "zero_point": )" + zeroPoint + R"(,
}
}
],
@@ -79,9 +77,9 @@ TEST_SUITE("TensorflowLiteParser_Quantize")
"UINT8") {}
};
- TEST_CASE_FIXTURE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
+ TEST_CASE_FIXTURE(SimpleQuantizeFixtureQAsymm8, "SimpleQuantizeFixtureQAsymm8")
{
- RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}},
{{"outputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}});
@@ -96,7 +94,7 @@ TEST_SUITE("TensorflowLiteParser_Quantize")
TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymm16, "SimpleQuantizeQsymm16")
{
- RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedSymm16>(
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::QSymmS16>(
0,
{{"inputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}},
{{"outputTensor", { 0, 1, 5, 32767, -1, -32768 }}});
@@ -106,7 +104,8 @@ TEST_SUITE("TensorflowLiteParser_Quantize")
{
SimpleQuantizeFixtureQSymmS8() : QuantizeFixture("[ 1, 6 ]",
"[ 1, 6 ]",
- "INT8") {}
+ "INT8",
+ "[]") {}
};
TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymmS8, "SimpleQuantizeQSymmS8")