aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp8
-rw-r--r--src/armnnTfLiteParser/test/Dequantize.cpp8
2 files changed, 10 insertions, 6 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 593f3eb02d..f5c01f249a 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -316,7 +316,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
type = armnn::DataType::Float32;
break;
case tflite::TensorType_INT8:
- if (tensorPtr->quantization->zero_point.size() == 1 && tensorPtr->quantization->zero_point[0] != 0)
+ if (tensorPtr->quantization->zero_point.size() == 1)
{
// Per-tensor
type = armnn::DataType::QAsymmS8;
@@ -398,7 +398,6 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
quantizationScales,
dimensionMappings[boost::numeric_cast<unsigned int>(
tensorPtr->quantization->quantized_dimension)]);
-
return result;
}
}
@@ -2900,6 +2899,11 @@ TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
tensorPtr,
tensorInfo,
permutationVector);
+ case armnn::DataType::QAsymmS8:
+ return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
+ tensorPtr,
+ tensorInfo,
+ permutationVector);
case armnn::DataType::Signed32:
return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
tensorPtr,
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
index 79dfe2e26a..663f2ca823 100644
--- a/src/armnnTfLiteParser/test/Dequantize.cpp
+++ b/src/armnnTfLiteParser/test/Dequantize.cpp
@@ -103,16 +103,16 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
}
- struct SimpleDequantizeFixtureQSymmS8 : DequantizeFixture
+ struct SimpleDequantizeFixtureQAsymmS8 : DequantizeFixture
{
- SimpleDequantizeFixtureQSymmS8() : DequantizeFixture("[ 1, 6 ]",
+ SimpleDequantizeFixtureQAsymmS8() : DequantizeFixture("[ 1, 6 ]",
"[ 1, 6 ]",
"INT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQSymmS8, SimpleDequantizeFixtureQSymmS8)
+ BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymmS8, SimpleDequantizeFixtureQAsymmS8)
{
- RunTest<2, armnn::DataType::QSymmS8 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QAsymmS8 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0, 1, 5, 127, -128, -1 }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});