From ba424d2aff70a13e1a16c4f9018a0bea4e5c11b3 Mon Sep 17 00:00:00 2001 From: josh minor Date: Wed, 13 Nov 2019 10:55:17 -0600 Subject: IVGCVSW-1530 Add TfLite slice parser and fix transpose perm vector creation * TfLite slice parser and relevant tests added * TfLite transpose parser logic added to translate Tf/np permutation vector definitions to Armnn definitions * TfLite transpose parser no permute data test modified to include data for default permutation vector when none specified Signed-off-by: josh minor Change-Id: Iebd30971bd180593dc6b8f0d5be1d1bc61a3a5bf --- CMakeLists.txt | 1 + src/armnnTfLiteParser/TensorFlowLiteSupport.md | 2 + src/armnnTfLiteParser/TfLiteParser.cpp | 63 ++++++++- src/armnnTfLiteParser/TfLiteParser.hpp | 1 + src/armnnTfLiteParser/test/Slice.cpp | 176 +++++++++++++++++++++++++ src/armnnTfLiteParser/test/Transpose.cpp | 55 ++++---- 6 files changed, 264 insertions(+), 34 deletions(-) create mode 100644 src/armnnTfLiteParser/test/Slice.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index f088a21bd5..21d1336578 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -718,6 +718,7 @@ if(BUILD_UNIT_TESTS) src/armnnTfLiteParser/test/ResizeBilinear.cpp src/armnnTfLiteParser/test/Softmax.cpp src/armnnTfLiteParser/test/SpaceToBatchND.cpp + src/armnnTfLiteParser/test/Slice.cpp src/armnnTfLiteParser/test/Split.cpp src/armnnTfLiteParser/test/Squeeze.cpp src/armnnTfLiteParser/test/StridedSlice.cpp diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md index 7fa299ebf8..145ca9f747 100644 --- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md +++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md @@ -46,6 +46,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators * RESIZE_BILINEAR +* SLICE + * SOFTMAX * SPACE_TO_BATCH diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 937131ccd7..9a20740914 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -459,6 +459,7 @@ TfLiteParser::TfLiteParser(const Optional& o m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean; m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack; m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad; + m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice; m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit; m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH; m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose; @@ -934,17 +935,27 @@ void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex) PermuteDescriptor desc; - if(inputs.size() == 2) + if (inputs.size() == 2) { armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]); BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer); - - std::vector permuteShape(permuteTensorInfo.GetNumElements()); + auto numPermVecElements = permuteTensorInfo.GetNumElements(); + std::vector permuteShape(numPermVecElements); ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes()); - PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements()); + // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form + // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0) + std::vector armnnPermuteShape(numPermVecElements); + std::vector::iterator it; + for (unsigned int i = 0u; i < numPermVecElements; ++i) + { + it = std::find(permuteShape.begin(), permuteShape.end(), i); + armnnPermuteShape[i] = static_cast(std::distance(permuteShape.begin(), it)); + } - desc = PermuteDescriptor(permutationVector); + PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements()); + + desc = PermuteDescriptor(permutationVector); } layer = m_Network->AddPermuteLayer(desc, layerName.c_str()); @@ -1254,6 +1265,48 @@ void TfLiteParser::ParsePool(size_t subgraphIndex, RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } +void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 3); + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + SliceDescriptor desc; + + // set begin tensor info for slice descriptor + armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]); + BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer); + + std::vector begin(beginTensorInfo.GetNumElements()); + ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes()); + + // set size tensor info for slice descriptor + armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]); + BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer); + + std::vector size(sizeTensorInfo.GetNumElements()); + ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes()); + desc = SliceDescriptor(begin, size); + + auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex); + IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // register the input connection slots for the layer, connections are made after all layers have been created + // only the tensors for the inputs are relevant, exclude the const tensors + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); + + // register the output connection slots for the layer, connections are made after all layers have been created + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index fb01fe8ba2..5ac6a892ad 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -116,6 +116,7 @@ private: void ParseRelu6(size_t subgraphIndex, size_t operatorIndex); void ParseReshape(size_t subgraphIndex, size_t operatorIndex); void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex); + void ParseSlice(size_t subgraphIndex, size_t operatorIndex); void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex); void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex); void ParseSplit(size_t subgraphIndex, size_t operatorIndex); diff --git a/src/armnnTfLiteParser/test/Slice.cpp b/src/armnnTfLiteParser/test/Slice.cpp new file mode 100644 index 0000000000..17d1b1a68c --- /dev/null +++ b/src/armnnTfLiteParser/test/Slice.cpp @@ -0,0 +1,176 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct SliceFixture : public ParserFlatbuffersFixture +{ + explicit SliceFixture(const std::string & inputShape, + const std::string & outputShape, + const std::string & beginData, + const std::string & sizeData) + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ + { + "builtin_code": "SLICE", + "version": 1 + } + ], + "subgraphs": [ + { + "tensors": [ + { + "shape": )" + inputShape + R"(, + "type": "FLOAT32", + "buffer": 0, + "name": "inputTensor", + "quantization": { + "min": [ + 0.0 + ], + "max": [ + 255.0 + ], + "details_type": 0, + "quantized_dimension": 0 + }, + "is_variable": false + }, + { + "shape": )" + outputShape + R"(, + "type": "FLOAT32", + "buffer": 1, + "name": "outputTensor", + "quantization": { + "details_type": 0, + "quantized_dimension": 0 + }, + "is_variable": false + })"; + m_JsonString += R"(, + { + "shape": [ + 3 + ], + "type": "INT32", + "buffer": 2, + "name": "beginTensor", + "quantization": { + } + })"; + m_JsonString += R"(, + { + "shape": [ + 3 + ], + "type": "INT32", + "buffer": 3, + "name": "sizeTensor", + "quantization": { + } + })"; + m_JsonString += R"(], + "inputs": [ + 0 + ], + "outputs": [ + 1 + ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ + 0, + 2, + 3)"; + m_JsonString += R"(], + "outputs": [ + 1 + ], + mutating_variable_inputs: [ + ] + } + ] + } + ], + "description": "TOCO Converted.", + "buffers": [ + { }, + { })"; + m_JsonString += R"(,{"data": )" + beginData + R"( })"; + m_JsonString += R"(,{"data": )" + sizeData + R"( })"; + m_JsonString += R"( + ] + } + )"; + SetupSingleInputSingleOutput("inputTensor", "outputTensor"); + } +}; + +struct SliceFixtureSingleDim : SliceFixture +{ + SliceFixtureSingleDim() : SliceFixture("[ 3, 2, 3 ]", + "[ 1, 1, 3 ]", + "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]", + "[ 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(SliceSingleDim, SliceFixtureSingleDim) +{ + RunTest<3, armnn::DataType::Float32>( + 0, + {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}}, + {{"outputTensor", { 3, 3, 3 }}}); + + BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() + == armnn::TensorShape({1,1,3}))); +} + +struct SliceFixtureD123 : SliceFixture +{ + SliceFixtureD123() : SliceFixture("[ 3, 2, 3 ]", + "[ 1, 2, 3 ]", + "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]", + "[ 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(SliceD123, SliceFixtureD123) +{ + RunTest<3, armnn::DataType::Float32>( + 0, + {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}}, + {{"outputTensor", { 3, 3, 3, 4, 4, 4 }}}); + + BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() + == armnn::TensorShape({1,2,3}))); +} + +struct SliceFixtureD213 : SliceFixture +{ + SliceFixtureD213() : SliceFixture("[ 3, 2, 3 ]", + "[ 2, 1, 3 ]", + "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]", + "[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(SliceD213, SliceFixtureD213) +{ + RunTest<3, armnn::DataType::Float32>( + 0, + {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}}, + {{"outputTensor", { 3, 3, 3, 5, 5, 5 }}}); + + BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() + == armnn::TensorShape({2,1,3}))); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/src/armnnTfLiteParser/test/Transpose.cpp b/src/armnnTfLiteParser/test/Transpose.cpp index 2e3190b62e..b2f953e75d 100644 --- a/src/armnnTfLiteParser/test/Transpose.cpp +++ b/src/armnnTfLiteParser/test/Transpose.cpp @@ -55,24 +55,20 @@ struct TransposeFixture : public ParserFlatbuffersFixture }, "is_variable": false })"; - if (!permuteData.empty()) - { - m_JsonString += R"(, - { - "shape": [ - 3 - ], - "type": "INT32", - "buffer": 2, - "name": "permuteTensor", - "quantization": { - "details_type": 0, - "quantized_dimension": 0 - }, - "is_variable": false - })"; - } - + m_JsonString += R"(, + { + "shape": [ + 3 + ], + "type": "INT32", + "buffer": 2, + "name": "permuteTensor", + "quantization": { + "details_type": 0, + "quantized_dimension": 0 + }, + "is_variable": false + })"; m_JsonString += R"(], "inputs": [ 0 @@ -85,10 +81,7 @@ struct TransposeFixture : public ParserFlatbuffersFixture "opcode_index": 0, "inputs": [ 0)"; - if (!permuteData.empty()) - { - m_JsonString += R"(,2)"; - } + m_JsonString += R"(,2)"; m_JsonString += R"(], "outputs": [ 1 @@ -117,6 +110,7 @@ struct TransposeFixture : public ParserFlatbuffersFixture } }; +// Note that this assumes the Tensorflow permutation vector implementation as opposed to the armnn implemenation. struct TransposeFixtureWithPermuteData : TransposeFixture { TransposeFixtureWithPermuteData() : TransposeFixture("[ 2, 2, 3 ]", @@ -128,29 +122,32 @@ BOOST_FIXTURE_TEST_CASE(TransposeWithPermuteData, TransposeFixtureWithPermuteDat { RunTest<3, armnn::DataType::Float32>( 0, - {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}}, - {{"outputTensor", { 1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12 }}}); + {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}}, + {{"outputTensor", { 1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12 }}}); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({2,3,2}))); } +// Tensorflow default permutation behavior assumes no permute argument will create permute vector [n-1...0], +// where n is the number of dimensions of the input tensor +// In this case we should get output shape 3,2,2 given default permutation vector 2,1,0 struct TransposeFixtureWithoutPermuteData : TransposeFixture { TransposeFixtureWithoutPermuteData() : TransposeFixture("[ 2, 2, 3 ]", - "", - "[ 2, 3, 2 ]") {} + "[ 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ]", + "[ 3, 2, 2 ]") {} }; BOOST_FIXTURE_TEST_CASE(TransposeWithoutPermuteDims, TransposeFixtureWithoutPermuteData) { RunTest<3, armnn::DataType::Float32>( 0, - {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}}, - {{"outputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}}); + {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}}, + {{"outputTensor", { 1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12 }}}); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() - == armnn::TensorShape({2,3,2}))); + == armnn::TensorShape({3,2,2}))); } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1