aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruno Goncalves <bruno.slackware@gmail.com>2019-02-08 19:02:48 -0200
committerderek.lamberti <derek.lamberti@arm.com>2019-02-22 15:29:36 +0000
commitbaded14fc01be6c529c4755cda0cb39a278f0b39 (patch)
tree19765671c7da05bf976404b6c9869a17906ff8ad
parentdb947e26cf40b7dd274be3741f4f5f9231c10cef (diff)
downloadarmnn-baded14fc01be6c529c4755cda0cb39a278f0b39.tar.gz
Add space-to-batch-nd parser to tf-lite
Change-Id: I3bf86d44f811380559ec35eed0bc43b3bd97da80 Signed-off-by: Bruno Goncalves <bruno.slackware@gmail.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp49
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/SpaceToBatchND.cpp178
4 files changed, 229 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c465fde2ed..91a0a0a552 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -556,6 +556,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Reshape.cpp
src/armnnTfLiteParser/test/ResizeBilinear.cpp
src/armnnTfLiteParser/test/Softmax.cpp
+ src/armnnTfLiteParser/test/SpaceToBatchND.cpp
src/armnnTfLiteParser/test/Sub.cpp
src/armnnTfLiteParser/test/Squeeze.cpp
src/armnnTfLiteParser/test/LoadModel.cpp
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 31aab029ab..e19edc3821 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -432,6 +432,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
+ m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
@@ -990,6 +991,54 @@ void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 3);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
+ BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+
+ armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
+ BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
+
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
+ ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
+ {
+ padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
+ }
+
+ armnn::SpaceToBatchNdDescriptor desc;
+ desc.m_BlockShape = blockShape;
+ desc.m_PadList = padList;
+ desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+
+ auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
const armnn::TensorInfo & inputTensorInfo)
{
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index ceca9e2c44..e074b765f1 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -105,6 +105,7 @@ private:
void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
+ void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex);
void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
void ParseSub(size_t subgraphIndex, size_t operatorIndex);
void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/SpaceToBatchND.cpp b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
new file mode 100644
index 0000000000..6ff4f53bfc
--- /dev/null
+++ b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
@@ -0,0 +1,178 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SpaceToBatchNDFixture : public ParserFlatbuffersFixture
+{
+ explicit SpaceToBatchNDFixture(const std::string & inputShape,
+ const std::string & outputShape,
+ const std::string & blockShapeData,
+ const std::string & padListData)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "SPACE_TO_BATCH_ND" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 2 ],
+ "type": "INT32",
+ "buffer": 2,
+ "name": "blockShapeTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": [ 2, 2 ],
+ "type": "INT32",
+ "buffer": 3,
+ "name": "padListTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2, 3 ],
+ "outputs": [ 1 ],
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + blockShapeData + R"(, },
+ { "data": )" + padListData + R"(, },
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SpaceToBatchNDFixtureSimpleTest : public SpaceToBatchNDFixture
+{
+ SpaceToBatchNDFixtureSimpleTest() : SpaceToBatchNDFixture("[ 1, 4, 4, 1 ]",
+ "[ 4, 2, 2, 1 ]",
+ "[ 2,0,0,0, 2,0,0,0 ]",
+ "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdSimpleTest, SpaceToBatchNDFixtureSimpleTest)
+{
+ RunTest<4, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f, 16.0f }}},
+ {{ "outputTensor", { 1.0f, 3.0f, 9.0f, 11.0f,
+ 2.0f, 4.0f, 10.0f, 12.0f,
+ 5.0f, 7.0f, 13.0f, 15.0f,
+ 6.0f, 8.0f, 14.0f, 16.0f }}});
+}
+
+
+struct SpaceToBatchNDFixtureMultipleInputBatchesTest : public SpaceToBatchNDFixture
+{
+ SpaceToBatchNDFixtureMultipleInputBatchesTest() : SpaceToBatchNDFixture("[ 2, 2, 4, 1 ]",
+ "[ 8, 1, 2, 1 ]",
+ "[ 2,0,0,0, 2,0,0,0 ]",
+ "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdMultipleInputBatchesTest, SpaceToBatchNDFixtureMultipleInputBatchesTest)
+{
+ RunTest<4, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f, 16.0f }}},
+ {{ "outputTensor", { 1.0f, 3.0f, 9.0f, 11.0f,
+ 2.0f, 4.0f, 10.0f, 12.0f,
+ 5.0f, 7.0f, 13.0f, 15.0f,
+ 6.0f, 8.0f, 14.0f, 16.0f }}});
+}
+
+struct SpaceToBatchNDFixturePaddingTest : public SpaceToBatchNDFixture
+{
+ SpaceToBatchNDFixturePaddingTest() : SpaceToBatchNDFixture("[ 1, 5, 2, 1 ]",
+ "[ 6, 2, 2, 1 ]",
+ "[ 3,0,0,0, 2,0,0,0 ]",
+ "[ 1,0,0,0, 0,0,0,0, 2,0,0,0, 0,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdPaddingTest, SpaceToBatchNDFixturePaddingTest)
+{
+ RunTest<4, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f, 9.0f, 10.0f }}},
+ {{ "outputTensor", { 0.0f, 0.0f,
+ 0.0f, 5.0f,
+
+ 0.0f, 0.0f,
+ 0.0f, 6.0f,
+
+ 0.0f, 1.0f,
+ 0.0f, 7.0f,
+
+ 0.0f, 2.0f,
+ 0.0f, 8.0f,
+
+ 0.0f, 3.0f,
+ 0.0f, 9.0f,
+
+ 0.0f, 4.0f,
+ 0.0f, 10.0f, }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()