aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-02-19 10:08:33 +0000
committerKeith Davis <keith.davis@arm.com>2020-02-19 10:08:33 +0000
commit67e6c54b4c4ff1e46a9ed14014d279794a666969 (patch)
tree80f272650985f9cd7ae684791dc54d6f68bfd1ca /src/armnnTfLiteParser
parent48d709356269f4f4131d319f6638ab4f7b083931 (diff)
downloadarmnn-67e6c54b4c4ff1e46a9ed14014d279794a666969.tar.gz
IVGCVSW-4472 Yolo v3 ExecuteNetwork failing
* Add Debug workload for QAsymmS8/U8 * Change Dequantize tests to test AsymmS8 instead of SymmS8 * Fix incorrect supportedness within RefLayerSupport Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: Ie51f1e33c564d46c86bf0150b1addda3fc093d13
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp8
-rw-r--r--src/armnnTfLiteParser/test/Dequantize.cpp8
2 files changed, 10 insertions, 6 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 593f3eb02d..f5c01f249a 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -316,7 +316,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
type = armnn::DataType::Float32;
break;
case tflite::TensorType_INT8:
- if (tensorPtr->quantization->zero_point.size() == 1 && tensorPtr->quantization->zero_point[0] != 0)
+ if (tensorPtr->quantization->zero_point.size() == 1)
{
// Per-tensor
type = armnn::DataType::QAsymmS8;
@@ -398,7 +398,6 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
quantizationScales,
dimensionMappings[boost::numeric_cast<unsigned int>(
tensorPtr->quantization->quantized_dimension)]);
-
return result;
}
}
@@ -2900,6 +2899,11 @@ TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
tensorPtr,
tensorInfo,
permutationVector);
+ case armnn::DataType::QAsymmS8:
+ return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
+ tensorPtr,
+ tensorInfo,
+ permutationVector);
case armnn::DataType::Signed32:
return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
tensorPtr,
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
index 79dfe2e26a..663f2ca823 100644
--- a/src/armnnTfLiteParser/test/Dequantize.cpp
+++ b/src/armnnTfLiteParser/test/Dequantize.cpp
@@ -103,16 +103,16 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
}
- struct SimpleDequantizeFixtureQSymmS8 : DequantizeFixture
+ struct SimpleDequantizeFixtureQAsymmS8 : DequantizeFixture
{
- SimpleDequantizeFixtureQSymmS8() : DequantizeFixture("[ 1, 6 ]",
+ SimpleDequantizeFixtureQAsymmS8() : DequantizeFixture("[ 1, 6 ]",
"[ 1, 6 ]",
"INT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQSymmS8, SimpleDequantizeFixtureQSymmS8)
+ BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymmS8, SimpleDequantizeFixtureQAsymmS8)
{
- RunTest<2, armnn::DataType::QSymmS8 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QAsymmS8 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0, 1, 5, 127, -128, -1 }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});