aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-08-16 16:51:42 +0100
committerMatthew Jackson <matthew.jackson@arm.com>2019-08-20 12:19:16 +0000
commit74bf7da7e0448791efee8ada1f73f1887098e367 (patch)
treeb0274892775e78e9b68f1226a18d64953a6a889c
parent27bd9239bbf107bba1d68296676ea3e36453f6b0 (diff)
downloadarmnn-74bf7da7e0448791efee8ada1f73f1887098e367.tar.gz
IVGCVSW-3650 Add TfLite Parser support for Transpose Convolution layer
* Added ParseTransposeConv to TfLite Parser * New TransposeConv test file * Updated documentation for supported Transpose Convolution Change-Id: Id7356d8525556805c164af693ae2b16f6a8492fa Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TensorFlowLiteSupport.md2
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp92
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/TransposeConv.cpp177
5 files changed, 270 insertions, 3 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 05dd0ec16d..9c186e3e6e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -603,6 +603,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Squeeze.cpp
src/armnnTfLiteParser/test/StridedSlice.cpp
src/armnnTfLiteParser/test/Sub.cpp
+ src/armnnTfLiteParser/test/TransposeConv.cpp
src/armnnTfLiteParser/test/Unpack.cpp
src/armnnTfLiteParser/test/LoadModel.cpp
src/armnnTfLiteParser/test/GetBuffer.cpp
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
index 7acbf28397..a4c48ca6a0 100644
--- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -60,6 +60,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
* TANH
+* TRANSPOSE_CONV
+
* UNPACK
## Custom Operator
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index f345d4a6e1..0e11a5c3e1 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -460,6 +460,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
+ m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
}
@@ -737,7 +738,7 @@ void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
auto filterTensorAndData = CreateConstTensor(inputs[1],
filterTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
- armnn::IConnectableLayer* layer;
+ armnn::IConnectableLayer* layer = nullptr;
auto layerName = boost::str(boost::format("Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
@@ -826,7 +827,7 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, options->padding);
auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
- armnn::IConnectableLayer* layer;
+ armnn::IConnectableLayer* layer = nullptr;
auto layerName = boost::str(boost::format("DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
if (inputs.size() == 3)
@@ -864,6 +865,91 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsTransposeConvOptions();
+
+ TransposeConvolution2dDescriptor desc;
+ desc.m_BiasEnabled = false;
+ desc.m_StrideX = CHECKED_NON_NEGATIVE(options->stride_w);
+ desc.m_StrideY = CHECKED_NON_NEGATIVE(options->stride_h);
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2, 3);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]);
+
+ // TfLite uses NHWC tensors
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
+ CalcPadding(inputHeight,
+ filterHeight,
+ desc.m_StrideY,
+ 1, // DilationY
+ desc.m_PadTop,
+ desc.m_PadBottom,
+ options->padding);
+
+ CalcPadding(inputWidth,
+ filterWidth,
+ desc.m_StrideX,
+ 1, // DilationX
+ desc.m_PadLeft,
+ desc.m_PadRight,
+ options->padding);
+
+ auto filterTensorAndData = CreateConstTensor(inputs[1],
+ filterTensorInfo,
+ armnn::Optional<armnn::PermutationVector&>());
+
+ armnn::IConnectableLayer* layer = nullptr;
+ auto layerName = boost::str(boost::format("TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
+
+ if (inputs.size() == 3)
+ {
+ desc.m_BiasEnabled = true;
+ armnn::TensorInfo biasTensorInfo = ToTensorInfo(inputs[2]);
+ auto biasTensorAndData = CreateConstTensor(inputs[2],
+ biasTensorInfo,
+ armnn::Optional<armnn::PermutationVector&>());
+ layer = m_Network->AddTransposeConvolution2dLayer(desc,
+ filterTensorAndData.first,
+ Optional<ConstTensor>(biasTensorAndData.first),
+ layerName.c_str());
+ }
+ else
+ {
+ layer = m_Network->AddTransposeConvolution2dLayer(desc,
+ filterTensorAndData.first,
+ EmptyOptional(),
+ layerName.c_str());
+ }
+
+ BOOST_ASSERT(layer != nullptr);
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // only the tensors for the inputs are relevant, exclude the const (filter) tensor
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
{
ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
@@ -1776,7 +1862,7 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
auto filterTensorAndData = CreateConstTensor(inputs[1],
filterTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
- armnn::IConnectableLayer* layer;
+ armnn::IConnectableLayer* layer = nullptr;
auto layerName = boost::str(boost::format("FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
if (inputs.size() == 3)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 90b800d56d..dc0d6344f8 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -120,6 +120,7 @@ private:
void ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex);
void ParseSub(size_t subgraphIndex, size_t operatorIndex);
void ParseTanH(size_t subgraphIndex, size_t operatorIndex);
+ void ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex);
void ParseUnpack(size_t subgraphIndex, size_t operatorIndex);
void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
diff --git a/src/armnnTfLiteParser/test/TransposeConv.cpp b/src/armnnTfLiteParser/test/TransposeConv.cpp
new file mode 100644
index 0000000000..05b48ecf0b
--- /dev/null
+++ b/src/armnnTfLiteParser/test/TransposeConv.cpp
@@ -0,0 +1,177 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct TransposeConvFixture : public ParserFlatbuffersFixture
+{
+ explicit TransposeConvFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& filterShape,
+ const std::string& filterData,
+ const bool biasEnabled,
+ const std::string& biasShape,
+ const std::string& biasData,
+ const std::string& strideX,
+ const std::string& strideY,
+ const std::string& dataType)
+ {
+ std::string biasString;
+ if (biasEnabled)
+ {
+ biasString = R"(
+ {
+ "shape": )" + biasShape + R"(,
+ "type": "INT32",
+ "buffer": 2,
+ "name": "biasTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },)";
+
+ }
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "TRANSPOSE_CONV" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": ")" + dataType + R"(",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + filterShape + R"(,
+ "type": ")" + dataType + R"(",
+ "buffer": 1,
+ "name": "filterTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },)" + biasString + R"(
+ {
+ "shape": )" + outputShape + R"(,
+ "type": ")" + dataType + R"(",
+ "buffer": )" + (biasEnabled ? "3" : "2") + R"(,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ )" + (biasEnabled ? "3" : "2") + R"( ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 1)" + (biasEnabled ? ", 2" : "") + R"( ],
+ "outputs": [ )" + (biasEnabled ? "3" : "2") + R"( ],
+ "builtin_options_type": "TransposeConvOptions",
+ "builtin_options": {
+ "padding": "SAME",
+ "stride_w": )" + strideX + R"(,
+ "stride_h": )" + strideY + R"(
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { "data": )" + filterData + R"( },
+ { )" + (biasEnabled ? (R"("data": )" + biasData) : "") + R"( },
+ { }
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct SimpleTransposeConvFixture : TransposeConvFixture
+{
+ SimpleTransposeConvFixture()
+ : TransposeConvFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 3, 3, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 0, 1, 2, 4 ]", // filterData
+ false, // biasEnabled
+ "", // biasShape
+ "", // biasData
+ "1", // strideX
+ "1", // strideY
+ "UINT8") // dataType
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConv, SimpleTransposeConvFixture )
+{
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {
+ 1, 2,
+ 3, 4
+ },
+ {
+ 0, 1, 2,
+ 2, 11, 12,
+ 6, 20, 16
+ });
+}
+
+struct TransposeConvWithBiasFixture : TransposeConvFixture
+{
+ TransposeConvWithBiasFixture()
+ : TransposeConvFixture("[ 1, 2, 2, 1 ]", // inputShape
+ "[ 1, 3, 3, 1 ]", // outputShape
+ "[ 1, 2, 2, 1 ]", // filterShape
+ "[ 0, 1, 2, 4 ]", // filterData
+ true, // biasEnabled
+ "[ 1 ]", // biasShape
+ "[ 2, 0, 0, 0 ]", // biasData
+ "1", // strideX
+ "1", // strideY
+ "UINT8") // dataType
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseTransposeConvWithBias, TransposeConvWithBiasFixture )
+{
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {
+ 1, 2,
+ 3, 4
+ },
+ {
+ 2, 3, 5,
+ 4, 13, 14,
+ 8, 22, 18
+ });
+}
+
+BOOST_AUTO_TEST_SUITE_END()