aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNina Drozd <nindro01@e112888-lin.galway.arm.com>2019-04-08 10:52:10 +0100
committerNina Drozd <nina.drozd@arm.com>2019-04-08 15:28:19 +0100
commit0324f48e64edb99a5c8d819394545d97e0c2ae97 (patch)
treebecfb745cc292c583703bea59536fdf49ea182c9
parent1b63d6c138f9d7c772c7dc325467018b557ff9ed (diff)
downloadarmnn-0324f48e64edb99a5c8d819394545d97e0c2ae97.tar.gz
IVGCVSW-2844: Add TfLite Parser support for Split layer
* Added ParseSplit method * New Unit test Split.cpp * Updated TensorflowLiteSupport.md with new supported operator Change-Id: Iec80ba9ad7b48db8e86589ebae77bd7d8ed38fb2 Signed-off-by: Nina Drozd <nina.drozd@arm.com>
-rw-r--r--CMakeLists.txt3
-rw-r--r--src/armnnTfLiteParser/TensorFlowLiteSupport.md2
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp89
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp2
-rw-r--r--src/armnnTfLiteParser/test/Split.cpp137
5 files changed, 231 insertions, 2 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b297423904..162b509223 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -516,9 +516,10 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/ResizeBilinear.cpp
src/armnnTfLiteParser/test/Softmax.cpp
src/armnnTfLiteParser/test/SpaceToBatchND.cpp
- src/armnnTfLiteParser/test/Sub.cpp
+ src/armnnTfLiteParser/test/Split.cpp
src/armnnTfLiteParser/test/Squeeze.cpp
src/armnnTfLiteParser/test/StridedSlice.cpp
+ src/armnnTfLiteParser/test/Sub.cpp
src/armnnTfLiteParser/test/LoadModel.cpp
src/armnnTfLiteParser/test/GetBuffer.cpp
src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
index 84734c51b6..afbe2cec06 100644
--- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -48,6 +48,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
* SPACE_TO_BATCH
+* SPLIT
+
* SQUEEZE
* STRIDED_SLICE
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index b9a3522736..c00c2188a9 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -464,6 +464,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
+ m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
}
void TfLiteParser::ResetParser()
@@ -1851,6 +1852,94 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat
outputTensorIndexes[3]});
}
+void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto * options = operatorPtr->builtin_options.AsSplitOptions();
+
+ const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2);
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), numSplits);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[1]);
+
+ // This splitDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
+ const unsigned int splitDim = static_cast<unsigned int>(axisTensorInfo.GetShape()[0]);
+
+ // Armnn supports split along the channel dimension for data formats NHWC and NCHW.
+ if (splitDim == 0 || splitDim == 2)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "Dimension %1% for split is not supported by Armnn. %2%")
+ % splitDim
+ % CHECK_LOCATION().AsString()));
+ }
+
+ auto inputDimSize = inputTensorInfo.GetNumDimensions();
+ if (inputDimSize != MaxNumOfTensorDimensions)
+ {
+ throw ParseException(
+ boost::str(
+ boost::format(
+ "The number of dimensions: %1% for input tensors of the "
+ "split op should be %2% %3%")
+ % inputTensorInfo.GetNumDimensions()
+ % MaxNumOfTensorDimensions
+ % CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<unsigned int> splitterDimSizes(inputDimSize);
+
+ // Add current input shape to splitterDimSizes
+ for (unsigned int i = 0; i < inputDimSize; ++i)
+ {
+ splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
+ }
+
+ if (splitterDimSizes[splitDim] % numSplits != 0)
+ {
+ throw ParseException("Number of splits must evenly divide the dimension");
+ }
+ splitterDimSizes[splitDim] /= numSplits;
+
+ SplitterDescriptor splitDesc(numSplits);
+ for (unsigned int j = 0; j < numSplits; ++j)
+ {
+ // Set the size of the views.
+ for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
+ {
+ splitDesc.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
+ }
+ splitDesc.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
+ }
+
+ auto layerName = boost::str(boost::format("Split:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ TensorShape outShape = TensorShape(static_cast<unsigned int>(splitterDimSizes.size()),
+ splitterDimSizes.data());
+
+ for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
+ {
+ layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
+ inputTensorInfo.GetDataType()));
+ }
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
unsigned int outputSlot,
tflite::ActivationFunctionType activationType)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 2895487214..e166dd5d8c 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -115,8 +115,8 @@ private:
void ParseMul(size_t subgraphIndex, size_t operatorIndex);
void ParseMean(size_t subgraphIndex, size_t operatorIndex);
void ParsePad(size_t subgraphIndex, size_t operatorIndex);
-
void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
+ void ParseSplit(size_t subgraphIndex, size_t operatorIndex);
void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
void RegisterConsumerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IInputSlot* slot);
diff --git a/src/armnnTfLiteParser/test/Split.cpp b/src/armnnTfLiteParser/test/Split.cpp
new file mode 100644
index 0000000000..774a416750
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Split.cpp
@@ -0,0 +1,137 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SplitFixture : public ParserFlatbuffersFixture
+{
+ explicit SplitFixture(const std::string & inputShape,
+ const std::string & axisShape,
+ const std::string & numSplits,
+ const std::string & outputShape1,
+ const std::string & outputShape2)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "SPLIT" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + axisShape + R"(,
+ "type": "INT32",
+ "buffer": 1,
+ "name": "axis",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape1 + R"( ,
+ "type": "FLOAT32",
+ "buffer": 2,
+ "name": "outputTensor1",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape2 + R"( ,
+ "type": "FLOAT32",
+ "buffer": 3,
+ "name": "outputTensor2",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0, 1 ],
+ "outputs": [ 2, 3 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 1 ],
+ "outputs": [ 2, 3 ],
+ "builtin_options_type": "SplitOptions",
+ "builtin_options": {
+ "num_splits": )" + numSplits + R"(
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [ {}, {} ]
+ }
+ )";
+
+ Setup();
+ }
+};
+
+
+struct SimpleSplitFixture : SplitFixture
+{
+ SimpleSplitFixture() : SplitFixture( "[ 2, 2, 2, 2 ]", "[ 1 ]", "2",
+ "[ 2, 1, 2, 2 ]", "[ 2, 1, 2, 2 ]")
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwo, SimpleSplitFixture)
+{
+
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ { {"inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f } } },
+ { {"outputTensor1", { 1.0f, 2.0f, 3.0f, 4.0f, 9.0f, 10.0f, 11.0f, 12.0f }},
+ {"outputTensor2", { 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, 14.0f, 15.0f, 16.0f }}});
+}
+
+struct SimpleSplitAxisThreeFixture : SplitFixture
+{
+ SimpleSplitAxisThreeFixture() : SplitFixture( "[ 2, 2, 2, 2 ]", "[ 3 ]", "2",
+ "[ 2, 2, 2, 1 ]", "[ 2, 2, 2, 1 ]")
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwo, SimpleSplitAxisThreeFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ { {"inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+ 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f } } },
+ { {"outputTensor1", { 1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 13.0f, 15.0f }},
+ {"outputTensor2", { 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f } } } );
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file