aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--src/armnn/test/GraphTests.cpp12
-rw-r--r--src/armnn/test/GraphUtils.cpp63
-rw-r--r--src/armnn/test/GraphUtils.hpp28
-rw-r--r--src/armnnTfParser/TfParser.cpp22
-rw-r--r--src/armnnTfParser/TfParser.hpp3
-rw-r--r--src/armnnTfParser/test/Assert.cpp299
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp16
9 files changed, 410 insertions, 36 deletions
diff --git a/Android.mk b/Android.mk
index ad8026ada0..4c8c492766 100644
--- a/Android.mk
+++ b/Android.mk
@@ -215,6 +215,7 @@ LOCAL_SRC_FILES := \
src/armnn/test/EndToEndTest.cpp \
src/armnn/test/UtilsTests.cpp \
src/armnn/test/GraphTests.cpp \
+ src/armnn/test/GraphUtils.cpp \
src/armnn/test/RuntimeTests.cpp \
src/armnn/test/SubGraphTests.cpp \
src/armnn/test/TensorTest.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index be68798281..22725aeef4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -367,6 +367,7 @@ if(BUILD_UNIT_TESTS)
src/armnn/test/EndToEndTest.cpp
src/armnn/test/FloatingPointConverterTest.cpp
src/armnn/test/GraphTests.cpp
+ src/armnn/test/GraphUtils.cpp
src/armnn/test/GraphUtils.hpp
src/armnn/test/InstrumentTests.cpp
src/armnn/test/LayerValidateOutputTest.cpp
@@ -390,6 +391,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfParser/test/Activations.cpp
src/armnnTfParser/test/Addition.cpp
src/armnnTfParser/test/AddN.cpp
+ src/armnnTfParser/test/Assert.cpp
src/armnnTfParser/test/BiasAdd.cpp
src/armnnTfParser/test/BroadcastForAdd.cpp
src/armnnTfParser/test/Convolution2d.cpp
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 0c0ba8b000..cca4653509 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -29,18 +29,6 @@ bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armn
return (secondPos != order.end());
}
-static armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name)
-{
- for (auto&& layer : graph)
- {
- if (layer->GetNameStr() == name)
- {
- return layer;
- }
- }
- return nullptr;
-}
-
BOOST_AUTO_TEST_SUITE(Graph)
BOOST_AUTO_TEST_CASE(ClassGraph)
diff --git a/src/armnn/test/GraphUtils.cpp b/src/armnn/test/GraphUtils.cpp
new file mode 100644
index 0000000000..1f9bb44d3d
--- /dev/null
+++ b/src/armnn/test/GraphUtils.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GraphUtils.hpp"
+
+bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name)
+{
+ for (auto&& layer : graph)
+ {
+ if (layer->GetName() == name)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name)
+{
+ for (auto&& layer : graph)
+ {
+ if (layer->GetNameStr() == name)
+ {
+ return layer;
+ }
+ }
+ return nullptr;
+}
+
+bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num)
+{
+ return layer->GetNumInputSlots() == num;
+}
+
+bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num)
+{
+ return layer->GetNumOutputSlots() == num;
+}
+
+bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer,
+ unsigned int srcSlot, unsigned int destSlot,
+ const armnn::TensorInfo& expectedTensorInfo)
+{
+ const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(srcSlot);
+ const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
+ if (expectedTensorInfo != tensorInfo)
+ {
+ return false;
+ }
+ const unsigned int numConnections = outputSlot.GetNumConnections();
+ for (unsigned int c = 0; c < numConnections; ++c)
+ {
+ auto inputSlot = boost::polymorphic_downcast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
+ if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() &&
+ inputSlot->GetSlotIndex() == destSlot)
+ {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/src/armnn/test/GraphUtils.hpp b/src/armnn/test/GraphUtils.hpp
index 04f9727dc0..b51e4d179e 100644
--- a/src/armnn/test/GraphUtils.hpp
+++ b/src/armnn/test/GraphUtils.hpp
@@ -8,18 +8,16 @@
#include <string>
-namespace
-{
-
-bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name)
-{
- for (auto&& layer : graph)
- {
- if (layer->GetName() == name)
- {
- return true;
- }
- }
- return false;
-}
-} \ No newline at end of file
+
+bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name);
+
+armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name);
+
+bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num);
+
+bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num);
+
+bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer,
+ unsigned int srcSlot, unsigned int destSlot,
+ const armnn::TensorInfo& expectedTensorInfo);
+
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 1a0047fce6..7f04757b75 100644
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -357,7 +357,11 @@ const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_Ope
{ "Minimum", &TfParser::ParseMinimum },
{ "Equal", &TfParser::ParseEqual },
{ "Pad", &TfParser::ParsePad },
- { "Sub", &TfParser::ParseSub },
+ { "Sub", &TfParser::ParseSub }
+};
+
+const std::list<std::string> TfParser::m_ControlInputs = {
+ "Assert"
};
ITfParser* ITfParser::CreateRaw()
@@ -544,14 +548,8 @@ TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
{
- throw ParseException(
- boost::str(
- boost::format(
- "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
- % nodeDef.name()
- % nodeDef.input(j)
- % j
- % CHECK_LOCATION().AsString()));
+ // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
+ continue;
}
auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
@@ -2941,6 +2939,12 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
}
const std::string& operation = nodeDef.op();
+ auto itControlInput = std::find(m_ControlInputs.begin(), m_ControlInputs.end(), operation);
+ if (itControlInput != m_ControlInputs.end())
+ {
+ // We currently allow Control Input from TensorFlow graph but we ignore them from ArmNN graph.
+ return;
+ }
auto it = ms_OperationNameToParsingFunctions.find(operation);
if (it != ms_OperationNameToParsingFunctions.end())
{
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index 0d1e497e29..20c5233c8e 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -10,6 +10,7 @@
#include "armnn/Tensor.hpp"
#include "armnn/INetwork.hpp"
+#include <list>
#include <map>
#include <memory>
#include <unordered_map>
@@ -241,6 +242,8 @@ private:
/// Map of TensorFlow operation names to parsing member functions.
static const std::map<std::string, OperationParsingFunction> ms_OperationNameToParsingFunctions;
+ static const std::list<std::string> m_ControlInputs;
+
std::map<std::string, armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_RequestedOutputs;
diff --git a/src/armnnTfParser/test/Assert.cpp b/src/armnnTfParser/test/Assert.cpp
new file mode 100644
index 0000000000..111f158641
--- /dev/null
+++ b/src/armnnTfParser/test/Assert.cpp
@@ -0,0 +1,299 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+#include "test/GraphUtils.hpp"
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct AssertSimpleFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ AssertSimpleFixture()
+ {
+ // Placeholder AssertInput
+ // | \ /
+ // Add ------ Assert
+
+ m_Prototext = R"(
+ node {
+ name: "Placeholder"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "AssertInput"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 17.0
+ }
+ }
+ }
+ }
+ node {
+ name: "Assert"
+ op: "Assert"
+ input: "Placeholder"
+ input: "AssertInput"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Add"
+ op: "Add"
+ input: "Placeholder"
+ input: "Placeholder"
+ input: "^Assert"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ })";
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(AssertSimpleTest, AssertSimpleFixture)
+{
+ SetupSingleInputSingleOutput({ 1, 1, 1, 4 }, "Placeholder", "Add");
+ RunTest<4>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 2.0f, 4.0f, 6.0f, 8.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(AssertSimpleGraphStructureTest, AssertSimpleFixture)
+{
+ auto optimized = SetupOptimizedNetwork({ { "Placeholder", { 1, 1, 1, 4 } } }, { "Add" });
+
+ auto optimizedNetwork = boost::polymorphic_downcast<armnn::OptimizedNetwork*>(optimized.get());
+ auto graph = optimizedNetwork->GetGraph();
+
+ BOOST_TEST((graph.GetNumInputs() == 1));
+ BOOST_TEST((graph.GetNumOutputs() == 1));
+ BOOST_TEST((graph.GetNumLayers() == 3));
+
+ armnn::Layer* inputLayer = GetFirstLayerWithName(graph, "Placeholder");
+ BOOST_TEST((inputLayer->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(inputLayer, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(inputLayer, 1));
+
+ armnn::Layer* addLayer = GetFirstLayerWithName(graph, "Add");
+ BOOST_TEST((addLayer->GetType() == armnn::LayerType::Addition));
+ BOOST_TEST(CheckNumberOfInputSlot(addLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(addLayer, 1));
+
+ armnn::TensorInfo tensorInfo(armnn::TensorShape({1, 1, 1, 4}), armnn::DataType::Float32);
+ BOOST_TEST(IsConnected(inputLayer, addLayer, 0, 0, tensorInfo));
+ BOOST_TEST(IsConnected(inputLayer, addLayer, 0, 1, tensorInfo));
+
+ for (auto&& outputLayer : graph.GetOutputLayers())
+ {
+ BOOST_TEST(IsConnected(addLayer, const_cast<armnn::OutputLayer*>(outputLayer), 0, 0, tensorInfo));
+ }
+}
+
+struct AssertFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ AssertFixture()
+ {
+ // Input0 Input1 Input2
+ // | \ / |
+ // | Sub ------ Assert
+ // \ / /
+ // Output -------
+
+ m_Prototext = R"(
+ node {
+ name: "Input0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "Input1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "Sub"
+ op: "Sub"
+ input: "Input0"
+ input: "Input1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Input2"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "Assert"
+ op: "Assert"
+ input: "Input2"
+ input: "Sub"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Output"
+ op: "Add"
+ input: "Input0"
+ input: "Sub"
+ input: "^Assert"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ })";
+
+
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(AssertTest, AssertFixture)
+{
+ Setup({ { "Input0", { 1, 1, 2, 2 } },
+ { "Input1", { 1, 1, 2, 2 } } },
+ { "Output" });
+
+ RunTest<4>({ { "Input0", { 4.0f, 3.0f,
+ 2.0f, 1.0f } },
+
+ { "Input1", { 1.0f, 2.0f,
+ 3.0f, 4.0f } } },
+
+ { { "Output", { 7.0f, 4.0f,
+ 1.0f, -2.0f } } });
+}
+
+BOOST_FIXTURE_TEST_CASE(AssertGraphStructureTest, AssertFixture)
+{
+ auto optimized = SetupOptimizedNetwork({ { "Input0", { 1, 1, 2, 2 } },
+ { "Input1", { 1, 1, 2, 2 } } },
+ { "Output" });
+
+ auto optimizedNetwork = boost::polymorphic_downcast<armnn::OptimizedNetwork*>(optimized.get());
+ auto graph = optimizedNetwork->GetGraph();
+
+ BOOST_TEST((graph.GetNumInputs() == 2));
+ BOOST_TEST((graph.GetNumOutputs() == 1));
+ BOOST_TEST((graph.GetNumLayers() == 5));
+
+ armnn::Layer* inputLayer0 = GetFirstLayerWithName(graph, "Input0");
+ BOOST_TEST((inputLayer0->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(inputLayer0, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(inputLayer0, 1));
+
+ armnn::Layer* inputLayer1 = GetFirstLayerWithName(graph, "Input1");
+ BOOST_TEST((inputLayer1->GetType() == armnn::LayerType::Input));
+ BOOST_TEST(CheckNumberOfInputSlot(inputLayer1, 0));
+ BOOST_TEST(CheckNumberOfOutputSlot(inputLayer1, 1));
+
+ armnn::Layer* subLayer = GetFirstLayerWithName(graph, "Sub");
+ BOOST_TEST((subLayer->GetType() == armnn::LayerType::Subtraction));
+ BOOST_TEST(CheckNumberOfInputSlot(subLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(subLayer, 1));
+
+ armnn::Layer* addLayer = GetFirstLayerWithName(graph, "Output");
+ BOOST_TEST((addLayer->GetType() == armnn::LayerType::Addition));
+ BOOST_TEST(CheckNumberOfInputSlot(addLayer, 2));
+ BOOST_TEST(CheckNumberOfOutputSlot(addLayer, 1));
+
+ armnn::TensorInfo tensorInfo(armnn::TensorShape({1, 1, 2, 2}), armnn::DataType::Float32);
+ BOOST_TEST(IsConnected(inputLayer0, subLayer, 0, 0, tensorInfo));
+ BOOST_TEST(IsConnected(inputLayer1, subLayer, 0, 1, tensorInfo));
+ BOOST_TEST(IsConnected(inputLayer0, addLayer, 0, 0, tensorInfo));
+ BOOST_TEST(IsConnected(subLayer, addLayer, 0, 1, tensorInfo));
+
+ for (auto&& outputLayer : graph.GetOutputLayers())
+ {
+ BOOST_TEST(IsConnected(addLayer, const_cast<armnn::OutputLayer*>(outputLayer), 0, 0, tensorInfo));
+ }
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 669b1fd0ca..fa21aba479 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -6,10 +6,12 @@
#pragma once
#include <armnn/IRuntime.hpp>
+
#include <test/TensorHelpers.hpp>
#include <armnnOnnxParser/IOnnxParser.hpp>
+#include <Network.hpp>
#include <VerificationHelpers.hpp>
#include <backendsCommon/BackendRegistry.hpp>
@@ -40,6 +42,9 @@ struct ParserPrototxtFixture
void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
const std::vector<std::string>& requestedOutputs);
void Setup();
+ armnn::IOptimizedNetworkPtr SetupOptimizedNetwork(
+ const std::map<std::string,armnn::TensorShape>& inputShapes,
+ const std::vector<std::string>& requestedOutputs);
/// @}
/// Executes the network with the given input tensor and checks the result against the given output tensor.
@@ -125,6 +130,17 @@ void ParserPrototxtFixture<TParser>::Setup()
}
template<typename TParser>
+armnn::IOptimizedNetworkPtr ParserPrototxtFixture<TParser>::SetupOptimizedNetwork(
+ const std::map<std::string,armnn::TensorShape>& inputShapes,
+ const std::vector<std::string>& requestedOutputs)
+{
+ armnn::INetworkPtr network =
+ m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
+ auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
+ return optimized;
+}
+
+template<typename TParser>
template <std::size_t NumOutputDimensions>
void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData,
const std::vector<float>& expectedOutputData)