aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2019-12-06 09:55:55 +0000
committerFinn Williams <Finn.Williams@arm.com>2019-12-10 12:55:18 +0000
commited66d14ec0840282f74241dda46e02194278c3cc (patch)
treeb960d3a61e5251bf6a515be8bae4974e1a84bced
parent836b27bd73d62795e82d0ce666d728c94c216067 (diff)
downloadarmnn-ed66d14ec0840282f74241dda46e02194278c3cc.tar.gz
IVGCVSW-4191 Add DEQUANTIZE to the TfLite Parser
!armnn:2421 Change-Id: Icdb02b7248ed408c3c8ad2e3e38df5b7cda1c545 Signed-off-by: Finn Williams <Finn.Williams@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp32
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/Dequantize.cpp121
4 files changed, 155 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8f6d794c34..7af6a94760 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -706,6 +706,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Constant.cpp
src/armnnTfLiteParser/test/Conv2D.cpp
src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+ src/armnnTfLiteParser/test/Dequantize.cpp
src/armnnTfLiteParser/test/DetectionPostProcess.cpp
src/armnnTfLiteParser/test/FullyConnected.cpp
src/armnnTfLiteParser/test/L2Normalization.cpp
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index f06e244223..6853512c8f 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -314,6 +314,12 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
case tflite::TensorType_FLOAT32:
type = armnn::DataType::Float32;
break;
+ case tflite::TensorType_INT8:
+ type = armnn::DataType::QSymmS8;
+ break;
+ case tflite::TensorType_INT16:
+ type = armnn::DataType::QuantisedSymm16;
+ break;
case tflite::TensorType_INT32:
type = armnn::DataType::Signed32;
break;
@@ -440,6 +446,7 @@ TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& o
m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
@@ -923,6 +930,31 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
+
+ IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
+ BOOST_ASSERT(layer != nullptr);
+
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 7f97b6de71..a8241f6650 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -100,6 +100,7 @@ private:
void ParseConcatenation(size_t subgraphIndex, size_t operatorIndex);
void ParseConv2D(size_t subgraphIndex, size_t operatorIndex);
void ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex);
+ void ParseDequantize(size_t subgraphIndex, size_t operatorIndex);
void ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex);
void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex);
void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
new file mode 100644
index 0000000000..2f98c07a66
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Dequantize.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+ struct DequantizeFixture : public ParserFlatbuffersFixture
+ {
+ explicit DequantizeFixture(const std::string & inputShape,
+ const std::string & outputShape,
+ const std::string & dataType)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "DEQUANTIZE" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": )" + dataType + R"(,
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.5 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"( ,
+ "type": "FLOAT32",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "DequantizeOptions",
+ "builtin_options": {
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+ };
+
+ struct SimpleDequantizeFixtureQAsymm8 : DequantizeFixture
+ {
+ SimpleDequantizeFixtureQAsymm8() : DequantizeFixture("[ 1, 6 ]",
+ "[ 1, 6 ]",
+ "UINT8") {}
+ };
+
+ BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
+ {
+ RunTest<2, armnn::DataType::QuantisedAsymm8 , armnn::DataType::Float32>(
+ 0,
+ {{"inputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}},
+ {{"outputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}});
+ }
+
+ struct SimpleDequantizeFixtureQSymm16 : DequantizeFixture
+ {
+ SimpleDequantizeFixtureQSymm16() : DequantizeFixture("[ 1, 6 ]",
+ "[ 1, 6 ]",
+ "INT16") {}
+ };
+
+ BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
+ {
+ RunTest<2, armnn::DataType::QuantisedSymm16 , armnn::DataType::Float32>(
+ 0,
+ {{"inputTensor", { 0, 1, 5, 32767, -1, -32768 }}},
+ {{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
+ }
+
+ struct SimpleDequantizeFixtureQSymmS8 : DequantizeFixture
+ {
+ SimpleDequantizeFixtureQSymmS8() : DequantizeFixture("[ 1, 6 ]",
+ "[ 1, 6 ]",
+ "INT8") {}
+ };
+
+ BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQSymmS8, SimpleDequantizeFixtureQSymmS8)
+ {
+ RunTest<2, armnn::DataType::QSymmS8 , armnn::DataType::Float32>(
+ 0,
+ {{"inputTensor", { 0, 1, 5, 127, -128, -1 }}},
+ {{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});
+ }
+
+BOOST_AUTO_TEST_SUITE_END()