aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruno Goncalves <bruno.slackware@gmail.com>2018-12-19 12:52:01 -0200
committerBruno Goncalves <bruno.slackware@gmail.com>2019-01-21 18:36:18 -0200
commit6c2355b1f7dd722eb908dd505826df8df6756471 (patch)
tree520f8bacda2223dc4ff272405d1390aeefff89bd
parent2235ceea7b94fd8ae3933ff75ba6428fa697c6b9 (diff)
downloadarmnn-6c2355b1f7dd722eb908dd505826df8df6756471.tar.gz
Added ParsePad method to TfLiteParser
Change-Id: I2e671f66cf1b0a24b4ca9e96b554dc7db3af9655
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp36
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/Pad.cpp104
4 files changed, 142 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 134fd103cc..87d91b96b4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -454,6 +454,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/MaxPool2D.cpp
src/armnnTfLiteParser/test/Mean.cpp
src/armnnTfLiteParser/test/Multiplication.cpp
+ src/armnnTfLiteParser/test/Pad.cpp
src/armnnTfLiteParser/test/Reshape.cpp
src/armnnTfLiteParser/test/Softmax.cpp
src/armnnTfLiteParser/test/Squeeze.cpp
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index e3306b317f..0e8d3c5b68 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -453,6 +453,7 @@ TfLiteParser::TfLiteParser()
m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
+ m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
}
void TfLiteParser::ResetParser()
@@ -1086,6 +1087,41 @@ void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+
+ TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
+ BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+
+ std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
+ ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ armnn::PadDescriptor desc;
+ for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
+ {
+ desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
+ }
+
+ auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
+ IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
+
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 581e0121fe..6c264372ba 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -104,6 +104,7 @@ private:
void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
void ParseMul(size_t subgraphIndex, size_t operatorIndex);
void ParseMean(size_t subgraphIndex, size_t operatorIndex);
+ void ParsePad(size_t subgraphIndex, size_t operatorIndex);
void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp
new file mode 100644
index 0000000000..09b744a7ce
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Pad.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct PadFixture : public ParserFlatbuffersFixture
+{
+ explicit PadFixture(const std::string & inputShape,
+ const std::string & outputShape,
+ const std::string & padListShape,
+ const std::string & padListData)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "PAD" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ },
+ {
+ "shape": )" + padListShape + R"( ,
+ "type": "INT32",
+ "buffer": 2,
+ "name": "padList",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + padListData + R"(, },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct SimplePadFixture : public PadFixture
+{
+ SimplePadFixture() : PadFixture("[ 2, 3 ]", "[ 4, 7 ]", "[ 2, 2 ]",
+ "[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
+{
+ RunTest<2, float>(0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
+ {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+