From bcca1f4a7ba8364f7b5e58e8e8866ccd7d530f92 Mon Sep 17 00:00:00 2001 From: Matthew Jackson Date: Tue, 16 Jul 2019 11:39:21 +0100 Subject: IVGCVSW-3423 Add TfLite parser support for Stack (Pack) layer * Added ParsePack method * New unit test Pack.cpp * Updated TensorFlowLiteSupport.md with new supported operator Signed-off-by: Matthew Jackson Change-Id: I2310b33ee26959b036bb4452a25c90cc1d4cbf20 --- CMakeLists.txt | 1 + src/armnn/InternalTypes.cpp | 1 + src/armnnTfLiteParser/TensorFlowLiteSupport.md | 2 + src/armnnTfLiteParser/TfLiteParser.cpp | 41 +++++++ src/armnnTfLiteParser/TfLiteParser.hpp | 1 + src/armnnTfLiteParser/test/Pack.cpp | 153 +++++++++++++++++++++++++ 6 files changed, 199 insertions(+) create mode 100644 src/armnnTfLiteParser/test/Pack.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index dbb212ff73..9bc4201a5e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -570,6 +570,7 @@ if(BUILD_UNIT_TESTS) src/armnnTfLiteParser/test/Mean.cpp src/armnnTfLiteParser/test/Minimum.cpp src/armnnTfLiteParser/test/Multiplication.cpp + src/armnnTfLiteParser/test/Pack.cpp src/armnnTfLiteParser/test/Pad.cpp src/armnnTfLiteParser/test/Reshape.cpp src/armnnTfLiteParser/test/ResizeBilinear.cpp diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 417581f010..896e4161ba 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -55,6 +55,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Softmax: return "Softmax"; case LayerType::SpaceToBatchNd: return "SpaceToBatchNd"; case LayerType::Splitter: return "Splitter"; + case LayerType::Stack: return "Stack"; case LayerType::StridedSlice: return "StridedSlice"; case LayerType::Subtraction: return "Subtraction"; case LayerType::Switch: return "Switch"; diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md index 9dd68a3372..8a8b7bed6d 100644 --- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md +++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md @@ -32,6 +32,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators * MUL +* PACK + * PAD * RELU diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 9ee327933a..21c17155b5 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -455,6 +455,7 @@ TfLiteParser::TfLiteParser() m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd; m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul; m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean; + m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack; m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad; m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit; m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH; @@ -1906,6 +1907,46 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat outputTensorIndexes[3]}); } +/// The TfLite Pack operator is equivalent to the ArmNN Stack operator +void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + if (inputs.size() < 1) + { + throw ParseException("Pack must have at least one input."); + } + + const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; + const auto* options = operatorPtr->builtin_options.AsPackOptions(); + + StackDescriptor desc; + desc.m_Axis = static_cast(options->axis); + desc.m_NumInputs = static_cast(inputs.size()); + + // Use the tensor shape of the first input as the "correct" input shape in the descriptor + armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]); + desc.m_InputShape = inputTensorInfo.GetShape(); + + auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex); + IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str()); + + BOOST_ASSERT(layer != nullptr); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes}); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index 07a0f26590..437e459732 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -105,6 +105,7 @@ private: void ParseMean(size_t subgraphIndex, size_t operatorIndex); void ParseMinimum(size_t subgraphIndex, size_t operatorIndex); void ParseMul(size_t subgraphIndex, size_t operatorIndex); + void ParsePack(size_t subgraphIndex, size_t operatorIndex); void ParsePad(size_t subgraphIndex, size_t operatorIndex); void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm); void ParseRelu(size_t subgraphIndex, size_t operatorIndex); diff --git a/src/armnnTfLiteParser/test/Pack.cpp b/src/armnnTfLiteParser/test/Pack.cpp new file mode 100644 index 0000000000..011312f7c9 --- /dev/null +++ b/src/armnnTfLiteParser/test/Pack.cpp @@ -0,0 +1,153 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct PackFixture : public ParserFlatbuffersFixture +{ + explicit PackFixture(const std::string & inputShape, + const unsigned int numInputs, + const std::string & outputShape, + const std::string & axis) + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "PACK" } ], + "subgraphs": [ { + "tensors": [)"; + + for (unsigned int i = 0; i < numInputs; ++i) + { + m_JsonString += R"( + { + "shape": )" + inputShape + R"(, + "type": "FLOAT32", + "buffer": )" + std::to_string(i) + R"(, + "name": "inputTensor)" + std::to_string(i + 1) + R"(", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + },)"; + } + + std::string inputIndexes; + for (unsigned int i = 0; i < numInputs-1; ++i) + { + inputIndexes += std::to_string(i) + R"(, )"; + } + inputIndexes += std::to_string(numInputs-1); + + m_JsonString += R"( + { + "shape": )" + outputShape + R"( , + "type": "FLOAT32", + "buffer": )" + std::to_string(numInputs) + R"(, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ )" + inputIndexes + R"( ], + "outputs": [ 2 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ )" + inputIndexes + R"( ], + "outputs": [ 2 ], + "builtin_options_type": "PackOptions", + "builtin_options": { + "axis": )" + axis + R"(, + "values_count": )" + std::to_string(numInputs) + R"( + }, + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [)"; + + for (unsigned int i = 0; i < numInputs-1; ++i) + { + m_JsonString += R"( + { },)"; + } + m_JsonString += R"( + { } + ] + })"; + Setup(); + } +}; + +struct SimplePackFixture : PackFixture +{ + SimplePackFixture() : PackFixture("[ 3, 2, 3 ]", + 2, + "[ 3, 2, 3, 2 ]", + "3") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParsePack, SimplePackFixture) +{ + RunTest<4, armnn::DataType::Float32>( + 0, + { {"inputTensor1", { 1, 2, 3, + 4, 5, 6, + + 7, 8, 9, + 10, 11, 12, + + 13, 14, 15, + 16, 17, 18 } }, + {"inputTensor2", { 19, 20, 21, + 22, 23, 24, + + 25, 26, 27, + 28, 29, 30, + + 31, 32, 33, + 34, 35, 36 } } }, + { {"outputTensor", { 1, 19, + 2, 20, + 3, 21, + + 4, 22, + 5, 23, + 6, 24, + + + 7, 25, + 8, 26, + 9, 27, + + 10, 28, + 11, 29, + 12, 30, + + + 13, 31, + 14, 32, + 15, 33, + + 16, 34, + 17, 35, + 18, 36 } } }); +} + +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1