aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfParser
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2018-12-21 18:30:00 +0000
committerLes Bell <les.bell@arm.com>2019-01-02 09:25:42 +0000
commit6f37f83a27160948fee366b9f195c52f78cb88f0 (patch)
treed09a8d5769c3ac2c8f45660d305e9a6124716310 /src/armnnTfParser
parentc48ac8c8cea1748ebfef15144f070799d4a129c3 (diff)
downloadarmnn-6f37f83a27160948fee366b9f195c52f78cb88f0.tar.gz
IVGCVSW-2353 Ignore control inputs in TensorFlow parser
* Allow control inputs from TensorFlow graph but ignore them in ArmNN graph. * Add utility function to test ArmNN graph structure. * Add ArmNN graph structure tests in TensorFlow paresr to ensure that control inputs are ignored in ArmNN graph as well as their inputs that are not used anywhere else. Change-Id: Ib0ea0d2df85e3fc79b748fa4c9d20e0649352bc1
Diffstat (limited to 'src/armnnTfParser')
-rw-r--r--src/armnnTfParser/TfParser.cpp22
-rw-r--r--src/armnnTfParser/TfParser.hpp3
-rw-r--r--src/armnnTfParser/test/Assert.cpp299
3 files changed, 315 insertions, 9 deletions
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 1a0047fce6..7f04757b75 100644
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -357,7 +357,11 @@ const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_Ope
{ "Minimum", &TfParser::ParseMinimum },
{ "Equal", &TfParser::ParseEqual },
{ "Pad", &TfParser::ParsePad },
- { "Sub", &TfParser::ParseSub },
+ { "Sub", &TfParser::ParseSub }
+};
+
+const std::list<std::string> TfParser::m_ControlInputs = {
+ "Assert"
};
ITfParser* ITfParser::CreateRaw()
@@ -544,14 +548,8 @@ TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
{
- throw ParseException(
- boost::str(
- boost::format(
- "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
- % nodeDef.name()
- % nodeDef.input(j)
- % j
- % CHECK_LOCATION().AsString()));
+ // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
+ continue;
}
auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
@@ -2941,6 +2939,12 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
}
const std::string& operation = nodeDef.op();
+ auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
+ if (itControlInput != m_ControlInputs.end())
+ {
+ // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
+ return;
+ }
auto it = ms_OperationNameToParsingFunctions.find(operation);
if (it != ms_OperationNameToParsingFunctions.end())
{
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index 0d1e497e29..20c5233c8e 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -10,6 +10,7 @@
#include "armnn/Tensor.hpp"
#include "armnn/INetwork.hpp"
+#include <list>
#include <map>
#include <memory>
#include <unordered_map>
@@ -241,6 +242,8 @@ private:
/// Map of TensorFlow operation names to parsing member functions.
static const std::map<std::string, OperationParsingFunction> ms_OperationNameToParsingFunctions;
+ static const std::list<std::string> m_ControlInputs;
+
std::map<std::string, armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_RequestedOutputs;
diff --git a/src/armnnTfParser/test/Assert.cpp b/src/armnnTfParser/test/Assert.cpp
new file mode 100644
index 0000000000..111f158641
--- /dev/null
+++ b/src/armnnTfParser/test/Assert.cpp
@@ -0,0 +1,299 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+#include "test/GraphUtils.hpp"
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct AssertSimpleFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ AssertSimpleFixture()
+ {
+ // Placeholder AssertInput
+ // | \ /
+ // Add ------ Assert
+
+ m_Prototext = R"(
+ node {
+ name: "Placeholder"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "AssertInput"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 17.0
+ }
+ }
+ }
+ }
+ node {
+ name: "Assert"
+ op: "Assert"
+ input: "Placeholder"
+ input: "AssertInput"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Add"
+ op: "Add"
+ input: "Placeholder"
+ input: "Placeholder"
+ input: "^Assert"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(AssertSimpleTest, AssertSimpleFixture)
+{
+ SetupSingleInputSingleOutput({ 1, 1, 1, 4 }, "Placeholder", "Add");
+ RunTest<4>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 2.0f, 4.0f, 6.0f, 8.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(AssertSimpleGraphStructureTest, AssertSimpleFixture)
+{
+ auto optimized = SetupOptimizedNetwork({ { "Placeholder", { 1, 1, 1, 4 } } }, { "Add" });
+
+ auto optimizedNetwork = boost::polymorphic_downcast<armnn::OptimizedNetwork*>(optimized.get());
+ auto graph = optimizedNetwork->GetGraph();
+
+ BOOST_TEST((graph.GetNumInputs() == 1));
+ BOOST_TEST((graph.GetNumOutputs() == 1));
+ BOOST_TEST((graph.GetNumLayers() == 3));
+
+ armnn::Layer* inputLayer = GetFirstLayerWithName(graph, "Placeholder");
+ BOOST_TEST((inputLayer->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(inputLayer, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(inputLayer, 1));
+
+ armnn::Layer* addLayer = GetFirstLayerWithName(graph, "Add");
+ BOOST_TEST((addLayer->GetType() == armnn::LayerType::Addition));
+ BOOST_TEST(CheckNumberOfInputSlot(addLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(addLayer, 1));
+
+ armnn::TensorInfo tensorInfo(armnn::TensorShape({1, 1, 1, 4}), armnn::DataType::Float32);
+ BOOST_TEST(IsConnected(inputLayer, addLayer, 0, 0, tensorInfo));
+ BOOST_TEST(IsConnected(inputLayer, addLayer, 0, 1, tensorInfo));
+
+ for (auto&& outputLayer : graph.GetOutputLayers())
+ {
+ BOOST_TEST(IsConnected(addLayer, const_cast<armnn::OutputLayer*>(outputLayer), 0, 0, tensorInfo));
+ }
+}
+
+struct AssertFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ AssertFixture()
+ {
+ // Input0 Input1 Input2
+ // | \ / |
+ // | Sub ------ Assert
+ // \ / /
+ // Output -------
+
+ m_Prototext = R"(
+ node {
+ name: "Input0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "Input1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "Sub"
+ op: "Sub"
+ input: "Input0"
+ input: "Input1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Input2"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "Assert"
+ op: "Assert"
+ input: "Input2"
+ input: "Sub"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Output"
+ op: "Add"
+ input: "Input0"
+ input: "Sub"
+ input: "^Assert"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ })";
+
+
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(AssertTest, AssertFixture)
+{
+ Setup({ { "Input0", { 1, 1, 2, 2 } },
+ { "Input1", { 1, 1, 2, 2 } } },
+ { "Output" });
+
+ RunTest<4>({ { "Input0", { 4.0f, 3.0f,
+ 2.0f, 1.0f } },
+
+ { "Input1", { 1.0f, 2.0f,
+ 3.0f, 4.0f } } },
+
+ { { "Output", { 7.0f, 4.0f,
+ 1.0f, -2.0f } } });
+}
+
+BOOST_FIXTURE_TEST_CASE(AssertGraphStructureTest, AssertFixture)
+{
+ auto optimized = SetupOptimizedNetwork({ { "Input0", { 1, 1, 2, 2 } },
+ { "Input1", { 1, 1, 2, 2 } } },
+ { "Output" });
+
+ auto optimizedNetwork = boost::polymorphic_downcast<armnn::OptimizedNetwork*>(optimized.get());
+ auto graph = optimizedNetwork->GetGraph();
+
+ BOOST_TEST((graph.GetNumInputs() == 2));
+ BOOST_TEST((graph.GetNumOutputs() == 1));
+ BOOST_TEST((graph.GetNumLayers() == 5));
+
+ armnn::Layer* inputLayer0 = GetFirstLayerWithName(graph, "Input0");
+ BOOST_TEST((inputLayer0->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(inputLayer0, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(inputLayer0, 1));
+
+ armnn::Layer* inputLayer1 = GetFirstLayerWithName(graph, "Input1");
+ BOOST_TEST((inputLayer1->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(inputLayer1, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(inputLayer1, 1));
+
+ armnn::Layer* subLayer = GetFirstLayerWithName(graph, "Sub");
+ BOOST_TEST((subLayer->GetType() == armnn::LayerType::Subtraction));
+ BOOST_TEST(CheckNumberOfInputSlot(subLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(subLayer, 1));
+
+ armnn::Layer* addLayer = GetFirstLayerWithName(graph, "Output");
+ BOOST_TEST((addLayer->GetType() == armnn::LayerType::Addition));
+ BOOST_TEST(CheckNumberOfInputSlot(addLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(addLayer, 1));
+
+ armnn::TensorInfo tensorInfo(armnn::TensorShape({1, 1, 2, 2}), armnn::DataType::Float32);
+ BOOST_TEST(IsConnected(inputLayer0, subLayer, 0, 0, tensorInfo));
+ BOOST_TEST(IsConnected(inputLayer1, subLayer, 0, 1, tensorInfo));
+ BOOST_TEST(IsConnected(inputLayer0, addLayer, 0, 0, tensorInfo));
+ BOOST_TEST(IsConnected(subLayer, addLayer, 0, 1, tensorInfo));
+
+ for (auto&& outputLayer : graph.GetOutputLayers())
+ {
+ BOOST_TEST(IsConnected(addLayer, const_cast<armnn::OutputLayer*>(outputLayer), 0, 0, tensorInfo));
+ }
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()