From bbf71a6478d62a8fdc9d153787e26db766efbc16 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 7 Sep 2020 14:05:22 +0100 Subject: IVGCVSW-5244 Load-scope dynamic tensor TfLite tests * Infer tensor shapes at the beginning of Optimize function * Unit tests Signed-off-by: Narumol Prangnawarat Change-Id: I511f1228a12ebcad570e42a0c46d461ab9ccdc2c --- src/armnn/Network.cpp | 6 +- src/armnnTfLiteParser/TfLiteParser.cpp | 20 --- .../test/LoadScopeDynamicTensor.cpp | 174 +++++++++++++++++++++ 3 files changed, 177 insertions(+), 23 deletions(-) create mode 100644 src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp (limited to 'src') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index dec9468d7b..84997a61e7 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1037,6 +1037,9 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // Get the optimized graph Graph& optGraph = optNetObjPtr->GetGraph(); + // Infer the tensor infos for all output slots. Throws an exception on failure + optGraph.InferTensorInfos(); + // Perform optimisation passes using namespace optimizations; Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(), @@ -1053,9 +1056,6 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, PermuteAndBatchToSpaceAsDepthToSpace(), TransposeAndBatchToSpaceAsDepthToSpace())); - // Infer the tensor infos for all output slots. Throws an exception on failure - optGraph.InferTensorInfos(); - // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16 if (options.m_ReduceFp32ToFp16) { diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 7b496173cc..8bc475347c 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -777,26 +777,6 @@ INetworkPtr TfLiteParser::CreateNetworkFromModel() } } - // if InferAndValidate set make sure all the TensorInfo set and all the dynamic output tensors are inferred - if (m_Options && m_Options.value().m_InferAndValidate) - { - for (subgraphIndex = 0; - subgraphIndex < m_SubgraphConnections.size(); - ++subgraphIndex) - { - if (m_SubgraphConnections[subgraphIndex].size() > 0) - { - // get the last output slot on the layer - auto outputSlot = - m_SubgraphConnections[subgraphIndex][m_SubgraphConnections[subgraphIndex].size() - 1].outputSlot; - if (outputSlot != nullptr) - { - outputSlot->IsTensorInfoSet(); - } - } - } - } - return std::move(m_Network); } diff --git a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp new file mode 100644 index 0000000000..c4f0db7f49 --- /dev/null +++ b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp @@ -0,0 +1,174 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "armnnTfLiteParser/ITfLiteParser.hpp" +#include "ParserFlatbuffersFixture.hpp" + +#include + +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct LoadScopeDynamicTensorFixture : public ParserFlatbuffersFixture +{ + explicit LoadScopeDynamicTensorFixture(const std::string& shape0, + const std::string& shape1, + const std::string& shape2) + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ + { + "builtin_code": "AVERAGE_POOL_2D", + "version": 1 + }, + { + "builtin_code": "SOFTMAX", + "version": 1 + } + ], + "subgraphs": [ + { + "tensors": [ + { + "shape": )" + shape0 + R"(, + "type": "FLOAT32", + "buffer": 1, + "name": "input0", + "quantization": { + "details_type": 0, + "quantized_dimension": 0 + }, + "is_variable": false + }, + { + "shape": )" + shape1 + R"(, + "type": "FLOAT32", + "buffer": 3, + "name": "output", + "quantization": { + "details_type": 0, + "quantized_dimension": 0 + }, + "is_variable": false + }, + { + "shape": )" + shape2 + R"(, + "type": "FLOAT32", + "buffer": 2, + "name": "model/average_pooling2d/AvgPool", + "quantization": { + "details_type": 0, + "quantized_dimension": 0 + }, + "is_variable": false + } + ], + "inputs": [ + 0 + ], + "outputs": [ + 1 + ], + "operators": [ + { + "opcode_index": 1, + "inputs": [ + 2 + ], + "outputs": [ + 1 + ], + "builtin_options_type": "SoftmaxOptions", + "builtin_options": { + "beta": 1.0 + }, + "custom_options_format": "FLEXBUFFERS" + }, + { + "opcode_index": 0, + "inputs": [ + 0 + ], + "outputs": [ + 2 + ], + "builtin_options_type": "Pool2DOptions", + "builtin_options": { + "padding": "VALID", + "stride_w": 2, + "stride_h": 2, + "filter_width": 2, + "filter_height": 2, + "fused_activation_function": "NONE" + }, + "custom_options_format": "FLEXBUFFERS" + } + ], + "name": "main" + } + ], + "description": "MLIR Converted.", + "buffers": [ + { + }, + { + }, + { + }, + { + } + ] + } + )"; + Setup(); + } +}; + +struct LoadScopeDynamicTensor0Fixture : LoadScopeDynamicTensorFixture +{ + LoadScopeDynamicTensor0Fixture() : LoadScopeDynamicTensorFixture("[ 1, 2, 3, 2 ]", "[]", "[]") {} +}; + +struct LoadScopeDynamicTensor1Fixture : LoadScopeDynamicTensorFixture +{ + LoadScopeDynamicTensor1Fixture() : LoadScopeDynamicTensorFixture("[ 1, 2, 4, 1 ]", "[ 1, 1, 2, 1 ]", "[]") {} +}; + +struct LoadScopeDynamicTensor2Fixture : LoadScopeDynamicTensorFixture +{ + LoadScopeDynamicTensor2Fixture() : LoadScopeDynamicTensorFixture("[ 1, 3, 3, 2 ]", "[ ]", "[ 1, 1, 1, 2 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor0, LoadScopeDynamicTensor0Fixture) +{ + RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>( + 0, + { {"input0", { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f }} }, + { {"output", { 0.26894143f, 0.7310586f }} }, + true); +} + +BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor1, LoadScopeDynamicTensor1Fixture) +{ + RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>( + 0, + { {"input0", { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f }} }, + { {"output", { 1.f, 1.f }} }, + true); +} + +BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor2, LoadScopeDynamicTensor2Fixture) +{ + RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>( + 0, + { {"input0", { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }} }, + { {"output", { 0.7772999f, 0.22270015f }} }, + true); +} + +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1