aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNina Drozd <nina.drozd@arm.com>2019-04-15 09:47:39 +0100
committerNina Drozd <nina.drozd@arm.com>2019-04-15 14:37:58 +0100
commit200e38039cf2cef21ae9ba6f86fab6fd524e5077 (patch)
treeb1bf4335036c1e8047130b2e82a1d2067be5e8fe
parentb1390fcea347e8e0476a961c128f2334c4d73217 (diff)
downloadarmnn-200e38039cf2cef21ae9ba6f86fab6fd524e5077.tar.gz
IVGCVSW-2848 - Add TfLite Parser support for Unpack layer
* Added ParseUnpack in TfLiteParser * New Unpack test file with test reproducing unpack in DeepSpeechV1 model * Added documentation for supported Unpack to TensorflorLiteSupport.md Signed-off-by: Nina Drozd <nina.drozd@arm.com> Change-Id: Ie920d46254ff4b4ab544407ace4c1d489af83157
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TensorFlowLiteSupport.md2
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp84
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp13
-rw-r--r--src/armnnTfLiteParser/test/Unpack.cpp119
5 files changed, 213 insertions, 6 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3febe7b6fb..e9172f2789 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -521,6 +521,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Squeeze.cpp
src/armnnTfLiteParser/test/StridedSlice.cpp
src/armnnTfLiteParser/test/Sub.cpp
+ src/armnnTfLiteParser/test/Unpack.cpp
src/armnnTfLiteParser/test/LoadModel.cpp
src/armnnTfLiteParser/test/GetBuffer.cpp
src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
index 821aecc9ec..dc163340aa 100644
--- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -58,6 +58,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
* TANH
+* UNPACK
+
## Custom Operator
* TFLite_Detection_PostProcess
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index f689deedf6..86688add9d 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -468,6 +468,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
+ m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
}
void TfLiteParser::ResetParser()
@@ -1867,6 +1868,83 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat
outputTensorIndexes[3]});
}
+void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
+
+ // This unpackAxis indicates the axis to unpack
+ const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
+ // If num is not defined, automatically infer from the length of the dimension axis.
+ if(unpackNum == 0)
+ {
+ unpackNum = inputTensorInfo.GetShape()[unpackAxis];
+ }
+
+ // If unpack number cannot be inferred and is still zero, throw ParseException.
+ if(unpackNum == 0)
+ {
+ throw ParseException("Number to unpack must greater than zero.");
+ }
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), unpackNum);
+
+ auto inputDimSize = inputTensorInfo.GetNumDimensions();
+ std::vector<unsigned int> unpackDimSizes(inputDimSize);
+
+ // Add current input shape to unpackDimSizes
+ for (unsigned int i = 0; i < inputDimSize; ++i)
+ {
+ unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
+ }
+
+ if (unpackDimSizes[unpackAxis] != unpackNum)
+ {
+ throw ParseException("Number to unpack must be the same as length of the dimension to "
+ "unpack along.");
+ }
+
+ unpackDimSizes[unpackAxis] /= unpackNum;
+
+ SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
+ for (unsigned int j = 0; j < unpackNum; ++j)
+ {
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
+ {
+ splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
+ }
+ splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
+ }
+
+ auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ TensorShape outShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
+ unpackDimSizes.data());
+
+ for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
+ {
+ layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
+ inputTensorInfo.GetDataType()));
+ }
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -1876,6 +1954,12 @@ void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
+ // If number of splits cannot be inferred and is zero, throw ParseException.
+ if(numSplits == 0)
+ {
+ throw ParseException("Number to splits must greater than zero.");
+ }
+
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(inputs.size(), 2);
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index a3ef22fa7e..929af1f0db 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -91,6 +91,7 @@ private:
void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType);
+ void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
void ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex);
void ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex);
void ParseConcatenation(size_t subgraphIndex, size_t operatorIndex);
@@ -101,23 +102,23 @@ private:
void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex);
void ParseMaximum(size_t subgraphIndex, size_t operatorIndex);
+ void ParseMean(size_t subgraphIndex, size_t operatorIndex);
void ParseMinimum(size_t subgraphIndex, size_t operatorIndex);
+ void ParseMul(size_t subgraphIndex, size_t operatorIndex);
+ void ParsePad(size_t subgraphIndex, size_t operatorIndex);
+ void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
void ParseRelu(size_t subgraphIndex, size_t operatorIndex);
void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex);
+ void ParseSplit(size_t subgraphIndex, size_t operatorIndex);
void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
void ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex);
void ParseSub(size_t subgraphIndex, size_t operatorIndex);
- void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
- void ParseMul(size_t subgraphIndex, size_t operatorIndex);
- void ParseMean(size_t subgraphIndex, size_t operatorIndex);
- void ParsePad(size_t subgraphIndex, size_t operatorIndex);
- void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
- void ParseSplit(size_t subgraphIndex, size_t operatorIndex);
void ParseTanH(size_t subgraphIndex, size_t operatorIndex);
+ void ParseUnpack(size_t subgraphIndex, size_t operatorIndex);
void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
void RegisterConsumerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IInputSlot* slot);
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
new file mode 100644
index 0000000000..10e682e36a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct UnpackFixture : public ParserFlatbuffersFixture
+{
+ explicit UnpackFixture(const std::string & inputShape,
+ const unsigned int numberOfOutputs,
+ const std::string & outputShape,
+ const std::string & axis,
+ const std::string & num)
+ {
+ // As input index is 0, output indexes start at 1
+ std::string outputIndexes = "1";
+ for(unsigned int i = 1; i < numberOfOutputs; i++)
+ {
+ outputIndexes += ", " + std::to_string(i+1);
+ }
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "UNPACK" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },)";
+ // Append the required number of outputs for this UnpackFixture.
+ // As input index is 0, output indexes start at 1.
+ for(unsigned int i = 0; i < numberOfOutputs; i++)
+ {
+ m_JsonString += R"(
+ {
+ "shape": )" + outputShape + R"( ,
+ "type": "FLOAT32",
+ "buffer": )" + std::to_string(i + 1) + R"(,
+ "name": "outputTensor)" + std::to_string(i + 1) + R"(",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },)";
+ }
+ m_JsonString += R"(
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ )" + outputIndexes + R"( ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0 ],
+ "outputs": [ )" + outputIndexes + R"( ],
+ "builtin_options_type": "UnpackOptions",
+ "builtin_options": {
+ "axis": )" + axis;
+
+ if(!num.empty())
+ {
+ m_JsonString += R"(,
+ "num" : )" + num;
+ }
+
+ m_JsonString += R"(
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { }
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct DefaultUnpackAxisZeroFixture : UnpackFixture
+{
+ DefaultUnpackAxisZeroFixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxisZeroFixture)
+{
+ RunTest<2, armnn::DataType::Float32>(
+ 0,
+ { {"inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+ 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } } },
+ { {"outputTensor1", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }},
+ {"outputTensor2", { 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }},
+ {"outputTensor3", { 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f }},
+ {"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
+}
+
+BOOST_AUTO_TEST_SUITE_END()