From 6c2355b1f7dd722eb908dd505826df8df6756471 Mon Sep 17 00:00:00 2001 From: Bruno Goncalves Date: Wed, 19 Dec 2018 12:52:01 -0200 Subject: Added ParsePad method to TfLiteParser Change-Id: I2e671f66cf1b0a24b4ca9e96b554dc7db3af9655 --- src/armnnTfLiteParser/TfLiteParser.cpp | 36 ++++++++++++ src/armnnTfLiteParser/TfLiteParser.hpp | 1 + src/armnnTfLiteParser/test/Pad.cpp | 104 +++++++++++++++++++++++++++++++++ 3 files changed, 141 insertions(+) create mode 100644 src/armnnTfLiteParser/test/Pad.cpp (limited to 'src') diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index e3306b317f..0e8d3c5b68 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -453,6 +453,7 @@ TfLiteParser::TfLiteParser() m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd; m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul; m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean; + m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad; } void TfLiteParser::ResetParser() @@ -1086,6 +1087,41 @@ void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } +void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + + TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]); + BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer); + + std::vector padBuffer(padTensorInfo.GetNumElements()); + ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes()); + + size_t step = 2; + armnn::PadDescriptor desc; + for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i) + { + desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]); + } + + auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex); + IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str()); + + TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index 581e0121fe..6c264372ba 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -104,6 +104,7 @@ private: void ParseAdd(size_t subgraphIndex, size_t operatorIndex); void ParseMul(size_t subgraphIndex, size_t operatorIndex); void ParseMean(size_t subgraphIndex, size_t operatorIndex); + void ParsePad(size_t subgraphIndex, size_t operatorIndex); void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm); diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp new file mode 100644 index 0000000000..09b744a7ce --- /dev/null +++ b/src/armnnTfLiteParser/test/Pad.cpp @@ -0,0 +1,104 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct PadFixture : public ParserFlatbuffersFixture +{ + explicit PadFixture(const std::string & inputShape, + const std::string & outputShape, + const std::string & padListShape, + const std::string & padListData) + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "PAD" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape + R"(, + "type": "FLOAT32", + "buffer": 0, + "name": "inputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"(, + "type": "FLOAT32", + "buffer": 1, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + padListShape + R"( , + "type": "INT32", + "buffer": 2, + "name": "padList", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0 ], + "outputs": [ 1 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0, 2 ], + "outputs": [ 1 ], + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { }, + { "data": )" + padListData + R"(, }, + ] + } + )"; + SetupSingleInputSingleOutput("inputTensor", "outputTensor"); + } +}; + +struct SimplePadFixture : public PadFixture +{ + SimplePadFixture() : PadFixture("[ 2, 3 ]", "[ 4, 7 ]", "[ 2, 2 ]", + "[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture) +{ + RunTest<2, float>(0, + {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}}, + {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}}); +} + +BOOST_AUTO_TEST_SUITE_END() + -- cgit v1.2.1