aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfParser/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnTfParser/test')
-rw-r--r--src/armnnTfParser/test/Activations.cpp113
-rw-r--r--src/armnnTfParser/test/Addition.cpp78
-rw-r--r--src/armnnTfParser/test/BiasAdd.cpp104
-rw-r--r--src/armnnTfParser/test/BroadcastForAdd.cpp149
-rw-r--r--src/armnnTfParser/test/Concat.cpp183
-rw-r--r--src/armnnTfParser/test/ConcatOfConcats.cpp316
-rw-r--r--src/armnnTfParser/test/Constant.cpp321
-rw-r--r--src/armnnTfParser/test/Convolution2d.cpp322
-rw-r--r--src/armnnTfParser/test/DepthwiseConvolution2d.cpp166
-rw-r--r--src/armnnTfParser/test/FullyConnected.cpp579
-rw-r--r--src/armnnTfParser/test/FusedBatchNorm.cpp175
-rw-r--r--src/armnnTfParser/test/Identity.cpp161
-rw-r--r--src/armnnTfParser/test/LocalResponseNormalization.cpp121
-rw-r--r--src/armnnTfParser/test/MultiOutput.cpp144
-rw-r--r--src/armnnTfParser/test/Multiplication.cpp172
-rw-r--r--src/armnnTfParser/test/PassThru.cpp52
-rw-r--r--src/armnnTfParser/test/Pooling.cpp112
-rw-r--r--src/armnnTfParser/test/Reshape.cpp86
-rw-r--r--src/armnnTfParser/test/ResizeBilinear.cpp114
-rw-r--r--src/armnnTfParser/test/Shape.cpp94
-rw-r--r--src/armnnTfParser/test/Softmax.cpp55
-rw-r--r--src/armnnTfParser/test/Squeeze.cpp108
-rw-r--r--src/armnnTfParser/test/TestDependencies.cpp296
-rw-r--r--src/armnnTfParser/test/TestMultiInputsOutputs.cpp92
24 files changed, 4113 insertions, 0 deletions
diff --git a/src/armnnTfParser/test/Activations.cpp b/src/armnnTfParser/test/Activations.cpp
new file mode 100644
index 0000000000..72ed64d653
--- /dev/null
+++ b/src/armnnTfParser/test/Activations.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+
+struct ActivationFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit ActivationFixture(const char* activationFunction)
+ {
+ m_Prototext = "node {\n"
+ " name: \"Placeholder\"\n"
+ " op: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"shape\"\n"
+ " value {\n"
+ " shape {\n"
+ " unknown_rank: true\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "node {\n"
+ " name: \"";
+ m_Prototext.append(activationFunction);
+ m_Prototext.append("\"\n"
+ " op: \"");
+ m_Prototext.append(activationFunction);
+ m_Prototext.append("\"\n"
+ " input: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"T\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ "}\n");
+
+ SetupSingleInputSingleOutput({ 1, 7 }, "Placeholder", activationFunction);
+ }
+};
+
+
+struct ReLuFixture : ActivationFixture
+{
+ ReLuFixture() : ActivationFixture("Relu") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
+{
+ RunTest<2>({ -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
+}
+
+
+struct ReLu6Fixture : ActivationFixture
+{
+ ReLu6Fixture() : ActivationFixture("Relu6") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
+{
+ RunTest<2>({ -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
+}
+
+
+struct SigmoidFixture : ActivationFixture
+{
+ SigmoidFixture() : ActivationFixture("Sigmoid") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseSigmoid, SigmoidFixture)
+{
+ RunTest<2>({ -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f },
+ { 0.4750208f, 0.45016602f, 0.42555749f, 0.40131235f, 0.52497917f, 0.54983395f, 0.57444251f });
+}
+
+
+struct SoftplusFixture : ActivationFixture
+{
+ SoftplusFixture() : ActivationFixture("Softplus") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseSoftplus, SoftplusFixture)
+{
+ RunTest<2>({ -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f },
+ { 0.64439666f, 0.59813893f, 0.55435526f, 0.51301527f, 0.74439669f, 0.7981388f, 0.85435522f });
+}
+
+
+struct TanhFixture : ActivationFixture
+{
+ TanhFixture() : ActivationFixture("Tanh") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseTanh, TanhFixture)
+{
+ RunTest<2>({ -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f },
+ { -0.09966799f, -0.19737528f, -0.29131261f, -0.379949f, 0.09966799f, 0.19737528f, 0.29131261f });
+}
+
+
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Addition.cpp b/src/armnnTfParser/test/Addition.cpp
new file mode 100644
index 0000000000..c9e69268c6
--- /dev/null
+++ b/src/armnnTfParser/test/Addition.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct AdditionFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ AdditionFixture()
+ {
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " node { \n"
+ " name: \"softmax1\" \n"
+ " op: \"Softmax\" \n"
+ " input: \"graphInput\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " }\n"
+ " node {\n"
+ " name: \"softmax2\"\n"
+ " op : \"Softmax\"\n"
+ " input: \"graphInput\"\n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " }\n"
+ " node {\n"
+ " name: \"addition\"\n"
+ " op : \"Add\"\n"
+ " input: \"softmax1\"\n"
+ " input: \"softmax2\"\n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " }\n";
+
+ SetupSingleInputSingleOutput({ 1, 7 }, "graphInput", "addition");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseAddition, AdditionFixture)
+{
+ RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 2, 0, 0, 0, 0 });
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/BiasAdd.cpp b/src/armnnTfParser/test/BiasAdd.cpp
new file mode 100644
index 0000000000..e29aeb1057
--- /dev/null
+++ b/src/armnnTfParser/test/BiasAdd.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct BiasAddFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit BiasAddFixture(const std::string& dataFormat)
+ {
+ m_Prototext = R"(
+node {
+ name: "graphInput"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "bias"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 3
+ }
+ }
+ float_val: 1
+ float_val: 2
+ float_val: 3
+ }
+ }
+ }
+}
+node {
+ name: "biasAdd"
+ op : "BiasAdd"
+ input: "graphInput"
+ input: "bias"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "data_format"
+ value {
+ s: ")" + dataFormat + R"("
+ }
+ }
+}
+)";
+
+ SetupSingleInputSingleOutput({ 1, 3, 1, 3 }, "graphInput", "biasAdd");
+ }
+};
+
+struct BiasAddFixtureNCHW : BiasAddFixture
+{
+ BiasAddFixtureNCHW() : BiasAddFixture("NCHW") {}
+};
+
+struct BiasAddFixtureNHWC : BiasAddFixture
+{
+ BiasAddFixtureNHWC() : BiasAddFixture("NHWC") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseBiasAddNCHW, BiasAddFixtureNCHW)
+{
+ RunTest<4>(std::vector<float>(9), { 1, 1, 1, 2, 2, 2, 3, 3, 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseBiasAddNHWC, BiasAddFixtureNHWC)
+{
+ RunTest<4>(std::vector<float>(9), { 1, 2, 3, 1, 2, 3, 1, 2, 3 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/BroadcastForAdd.cpp b/src/armnnTfParser/test/BroadcastForAdd.cpp
new file mode 100644
index 0000000000..4c9731d7fc
--- /dev/null
+++ b/src/armnnTfParser/test/BroadcastForAdd.cpp
@@ -0,0 +1,149 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+// This is a special case for add, which supports broadcasting
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct BroadcastForAddFixtureSlot1 : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ BroadcastForAddFixtureSlot1()
+ {
+ m_Prototext = R"(
+ node {
+ name: "graphInput"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 4.0
+ float_val: 5.0
+ }
+ }
+ }
+ }
+ node {
+ name: "Add"
+ op: "Add"
+ input: "graphInput"
+ input: "Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+
+ SetupSingleInputSingleOutput({ 1, 2, 2, 2 }, "graphInput", "Add");
+ }
+};
+
+struct BroadcastForAddFixtureSlot0 : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ BroadcastForAddFixtureSlot0()
+ {
+ m_Prototext = R"(
+ node {
+ name: "graphInput"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 4.0
+ float_val: 5.0
+ }
+ }
+ }
+ }
+ node {
+ name: "Add"
+ op: "Add"
+ input: "Const_1"
+ input: "graphInput"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+
+ SetupSingleInputSingleOutput({ 1, 2, 2, 2 }, "graphInput", "Add");
+ }
+};
+
+
+BOOST_FIXTURE_TEST_CASE(ParseBroadcastForAddition1, BroadcastForAddFixtureSlot1)
+{
+ RunTest<4>({ 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0 }, { 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 9.0 });
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseBroadcastForAddition0, BroadcastForAddFixtureSlot0)
+{
+ RunTest<4>({ 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0 }, { 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 9.0 });
+}
+
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Concat.cpp b/src/armnnTfParser/test/Concat.cpp
new file mode 100644
index 0000000000..a7d5ea03af
--- /dev/null
+++ b/src/armnnTfParser/test/Concat.cpp
@@ -0,0 +1,183 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct ConcatFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit ConcatFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1,
+ unsigned int concatDim)
+ {
+ m_Prototext = R"(
+ node {
+ name: "graphInput0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "graphInput1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "concat/axis"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: )";
+
+ m_Prototext += std::to_string(concatDim);
+
+ m_Prototext += R"(
+ }
+ }
+ }
+ }
+ node {
+ name: "concat"
+ op: "ConcatV2"
+ input: "graphInput0"
+ input: "graphInput1"
+ input: "concat/axis"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+
+ Setup({{"graphInput0", inputShape0 },
+ {"graphInput1", inputShape1 }}, {"concat"});
+ }
+};
+
+struct ConcatFixtureNCHW : ConcatFixture
+{
+ ConcatFixtureNCHW() : ConcatFixture({ 1, 1, 2, 2 }, { 1, 1, 2, 2 }, 1 ) {}
+};
+
+struct ConcatFixtureNHWC : ConcatFixture
+{
+ ConcatFixtureNHWC() : ConcatFixture({ 1, 1, 2, 2 }, { 1, 1, 2, 2 }, 3 ) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseConcatNCHW, ConcatFixtureNCHW)
+{
+ RunTest<4>({{"graphInput0", {0.0, 1.0, 2.0, 3.0}},
+ {"graphInput1", {4.0, 5.0, 6.0, 7.0}}},
+ {{"concat", { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseConcatNHWC, ConcatFixtureNHWC)
+{
+ RunTest<4>({{"graphInput0", {0.0, 1.0, 2.0, 3.0}},
+ {"graphInput1", {4.0, 5.0, 6.0, 7.0}}},
+ {{"concat", { 0.0, 1.0, 4.0, 5.0, 2.0, 3.0, 6.0, 7.0 }}});
+}
+
+struct ConcatFixtureDim1 : ConcatFixture
+{
+ ConcatFixtureDim1() : ConcatFixture({ 1, 2, 3, 4 }, { 1, 2, 3, 4 }, 1) {}
+};
+
+struct ConcatFixtureDim3 : ConcatFixture
+{
+ ConcatFixtureDim3() : ConcatFixture({ 1, 2, 3, 4 }, { 1, 2, 3, 4 }, 3) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseConcatDim1, ConcatFixtureDim1)
+{
+ RunTest<4>({ { "graphInput0", { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
+ 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0 } },
+ { "graphInput1", { 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0,
+ 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0 } } },
+ { { "concat", { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
+ 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0,
+ 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0,
+ 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0 } } });
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseConcatDim3, ConcatFixtureDim3)
+{
+ RunTest<4>({ { "graphInput0", { 0.0, 1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0, 7.0,
+ 8.0, 9.0, 10.0, 11.0,
+ 12.0, 13.0, 14.0, 15.0,
+ 16.0, 17.0, 18.0, 19.0,
+ 20.0, 21.0, 22.0, 23.0 } },
+ { "graphInput1", { 50.0, 51.0, 52.0, 53.0,
+ 54.0, 55.0, 56.0, 57.0,
+ 58.0, 59.0, 60.0, 61.0,
+ 62.0, 63.0, 64.0, 65.0,
+ 66.0, 67.0, 68.0, 69.0,
+ 70.0, 71.0, 72.0, 73.0 } } },
+ { { "concat", { 0.0, 1.0, 2.0, 3.0,
+ 50.0, 51.0, 52.0, 53.0,
+ 4.0, 5.0, 6.0, 7.0,
+ 54.0, 55.0, 56.0, 57.0,
+ 8.0, 9.0, 10.0, 11.0,
+ 58.0, 59.0, 60.0, 61.0,
+ 12.0, 13.0, 14.0, 15.0,
+ 62.0, 63.0, 64.0, 65.0,
+ 16.0, 17.0, 18.0, 19.0,
+ 66.0, 67.0, 68.0, 69.0,
+ 20.0, 21.0, 22.0, 23.0,
+ 70.0, 71.0, 72.0, 73.0 } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfParser/test/ConcatOfConcats.cpp b/src/armnnTfParser/test/ConcatOfConcats.cpp
new file mode 100644
index 0000000000..7316b9f1ac
--- /dev/null
+++ b/src/armnnTfParser/test/ConcatOfConcats.cpp
@@ -0,0 +1,316 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct ConcatOfConcatsFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit ConcatOfConcatsFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1,
+ const armnn::TensorShape& inputShape2, const armnn::TensorShape& inputShape3,
+ unsigned int concatDim)
+ {
+ m_Prototext = R"(
+ node {
+ name: "graphInput0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "graphInput1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "graphInput2"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "graphInput3"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+ }
+ node {
+ name: "Relu"
+ op: "Relu"
+ input: "graphInput0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Relu_1"
+ op: "Relu"
+ input: "graphInput1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Relu_2"
+ op: "Relu"
+ input: "graphInput2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "Relu_3"
+ op: "Relu"
+ input: "graphInput3"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "concat/axis"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: )";
+ m_Prototext += std::to_string(concatDim);
+ m_Prototext += R"(
+ }
+ }
+ }
+ }
+ node {
+ name: "concat"
+ op: "ConcatV2"
+ input: "Relu"
+ input: "Relu_1"
+ input: "concat/axis"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "concat_1/axis"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: )";
+ m_Prototext += std::to_string(concatDim);
+ m_Prototext += R"(
+ }
+ }
+ }
+ }
+ node {
+ name: "concat_1"
+ op: "ConcatV2"
+ input: "Relu_2"
+ input: "Relu_3"
+ input: "concat_1/axis"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "concat_2/axis"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: )";
+ m_Prototext += std::to_string(concatDim);
+ m_Prototext += R"(
+ }
+ }
+ }
+ }
+ node {
+ name: "concat_2"
+ op: "ConcatV2"
+ input: "concat"
+ input: "concat_1"
+ input: "concat_2/axis"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ )";
+
+ Setup({{ "graphInput0", inputShape0 },
+ { "graphInput1", inputShape1 },
+ { "graphInput2", inputShape2 },
+ { "graphInput3", inputShape3}}, {"concat_2"});
+ }
+};
+
+struct ConcatOfConcatsFixtureNCHW : ConcatOfConcatsFixture
+{
+ ConcatOfConcatsFixtureNCHW() : ConcatOfConcatsFixture({ 1, 1, 2, 2 }, { 1, 1, 2, 2 }, { 1, 1, 2, 2 },
+ { 1, 1, 2, 2 }, 1 ) {}
+};
+
+struct ConcatOfConcatsFixtureNHWC : ConcatOfConcatsFixture
+{
+ ConcatOfConcatsFixtureNHWC() : ConcatOfConcatsFixture({ 1, 1, 2, 2 }, { 1, 1, 2, 2 }, { 1, 1, 2, 2 },
+ { 1, 1, 2, 2 }, 3 ) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseConcatOfConcatsNCHW, ConcatOfConcatsFixtureNCHW)
+{
+ RunTest<4>({{"graphInput0", {0.0, 1.0, 2.0, 3.0}},
+ {"graphInput1", {4.0, 5.0, 6.0, 7.0}},
+ {"graphInput2", {8.0, 9.0, 10.0, 11.0}},
+ {"graphInput3", {12.0, 13.0, 14.0, 15.0}}},
+ {{"concat_2", { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
+ 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseConcatOfConcatsNHWC, ConcatOfConcatsFixtureNHWC)
+{
+ RunTest<4>({{"graphInput0", {0.0, 1.0, 2.0, 3.0}},
+ {"graphInput1", {4.0, 5.0, 6.0, 7.0}},
+ {"graphInput2", {8.0, 9.0, 10.0, 11.0}},
+ {"graphInput3", {12.0, 13.0, 14.0, 15.0}}},
+ {{"concat_2", { 0.0, 1.0, 4.0, 5.0, 8.0, 9.0, 12.0, 13.0,
+ 2.0, 3.0, 6.0, 7.0, 10.0, 11.0, 14.0, 15.0 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Constant.cpp b/src/armnnTfParser/test/Constant.cpp
new file mode 100644
index 0000000000..09587fc3d5
--- /dev/null
+++ b/src/armnnTfParser/test/Constant.cpp
@@ -0,0 +1,321 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+
+#include "armnnTfParser/ITfParser.hpp"
+
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+// Tests that a Const node in Tensorflow can be converted to a ConstLayer in armnn (as opposed to most
+// Const nodes which are used as weight inputs for convolutions etc. and are therefore not converted to
+// armnn ConstLayers).
+struct ConstantFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ConstantFixture()
+ {
+ // input = tf.placeholder(tf.float32, name = "input")
+ // const = tf.constant([17], tf.float32, [1])
+ // output = tf.add(input, const, name = "output")
+ m_Prototext =
+ R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+}
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 17.0
+ }
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Add"
+ input: "input"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ SetupSingleInputSingleOutput({ 1 }, "input", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(Constant, ConstantFixture)
+{
+ RunTest<1>({1}, {18});
+}
+
+
+// Tests that a single Const node in Tensorflow can be used twice by a dependant node. This should result in only
+// a single armnn ConstLayer being created.
+struct ConstantReusedFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ConstantReusedFixture()
+ {
+ // const = tf.constant([17], tf.float32, [1])
+ // output = tf.add(const, const, name = "output")
+ m_Prototext =
+ R"(
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 17.0
+ }
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Add"
+ input: "Const"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ Setup({}, { "output" });
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ConstantReused, ConstantReusedFixture)
+{
+ RunTest<1>({}, { { "output", { 34 } } });
+}
+
+template <int ListSize>
+struct ConstantValueListFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ConstantValueListFixture()
+ {
+ m_Prototext =
+ R"(
+node {
+ name: "output"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ dim {
+ size: 3
+ }
+ })";
+
+ double value = 0.75;
+ for (int i = 0; i < ListSize; i++, value += 0.25)
+ {
+ m_Prototext += std::string("float_val : ") + std::to_string(value) + "\n";
+ }
+
+ m_Prototext +=
+ R"(
+ }
+ }
+ }
+}
+ )";
+ Setup({}, { "output" });
+ }
+};
+
+using ConstantSingleValueListFixture = ConstantValueListFixture<1>;
+using ConstantMultipleValueListFixture = ConstantValueListFixture<4>;
+using ConstantMaxValueListFixture = ConstantValueListFixture<6>;
+
+BOOST_FIXTURE_TEST_CASE(ConstantSingleValueList, ConstantSingleValueListFixture)
+{
+ RunTest<2>({}, { { "output", { 0.75f, 0.75f, 0.75f, 0.75f, 0.75f, 0.75f } } });
+}
+BOOST_FIXTURE_TEST_CASE(ConstantMultipleValueList, ConstantMultipleValueListFixture)
+{
+ RunTest<2>({}, { { "output", { 0.75f, 1.f, 1.25f, 1.5f, 1.5f, 1.5f } } });
+}
+BOOST_FIXTURE_TEST_CASE(ConstantMaxValueList, ConstantMaxValueListFixture)
+{
+ RunTest<2>({}, { { "output", { 0.75f, 1.f, 1.25f, 1.50f, 1.75f, 2.f } } });
+}
+
+template <bool WithShape, bool WithContent, bool WithValueList>
+struct ConstantCreateFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ConstantCreateFixture()
+ {
+ m_Prototext =
+ R"(
+node {
+ name: "output"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ )";
+
+ if (WithShape)
+ {
+ m_Prototext +=
+ R"(
+tensor_shape {
+ dim {
+ size: 2
+ }
+ dim {
+ size: 2
+ }
+}
+ )";
+ }
+ else
+ {
+ m_Prototext +=
+ R"(
+tensor_shape {
+}
+ )";
+ }
+
+ if (WithContent)
+ {
+ m_Prototext +=
+ R"(
+tensor_content: "\000\000\200?\000\000\200?\000\000\200?\000\000\200?\000\000\200?"
+ )";
+ }
+
+ if (WithValueList)
+ {
+ m_Prototext +=
+ R"(
+float_val: 1.0
+float_val: 1.0
+float_val: 1.0
+float_val: 1.0
+float_val: 1.0
+ )";
+ }
+
+ m_Prototext +=
+ R"(
+ }
+ }
+ }
+}
+ )";
+ }
+};
+
+using ConstantCreateNoValueListFixture = ConstantCreateFixture<true, false, true>;
+using ConstantCreateNoValueList2Fixture = ConstantCreateFixture<true, false, false>;
+using ConstantCreateNoContentFixture = ConstantCreateFixture<true, true, false>;
+using ConstantCreateNoContent2Fixture = ConstantCreateFixture<true, false, false>;
+using ConstantCreateNoShapeFixture = ConstantCreateFixture<false, false, false>;
+using ConstantCreateNoShape2Fixture = ConstantCreateFixture<false, true, false>;
+using ConstantCreateNoShape3Fixture = ConstantCreateFixture<false, false, true>;
+
+BOOST_FIXTURE_TEST_CASE(ConstantCreateInvalidValueList, ConstantCreateNoValueListFixture)
+{
+ BOOST_REQUIRE_THROW(Setup({}, { "output" }), armnn::ParseException);
+}
+BOOST_FIXTURE_TEST_CASE(ConstantCreateInvalidValueList2, ConstantCreateNoValueList2Fixture)
+{
+ BOOST_REQUIRE_THROW(Setup({}, { "output" }), armnn::ParseException);
+}
+BOOST_FIXTURE_TEST_CASE(ConstantCreateInvalidContent, ConstantCreateNoContentFixture)
+{
+ BOOST_REQUIRE_THROW(Setup({}, { "output" }), armnn::ParseException);
+}
+BOOST_FIXTURE_TEST_CASE(ConstantCreateInvalidShape, ConstantCreateNoShapeFixture)
+{
+ BOOST_REQUIRE_THROW(Setup({}, { "output" }), armnn::ParseException);
+}
+BOOST_FIXTURE_TEST_CASE(ConstantCreateNoShape2, ConstantCreateNoShape2Fixture)
+{
+ BOOST_REQUIRE_THROW(Setup({}, { "output" }), armnn::ParseException);
+}
+BOOST_FIXTURE_TEST_CASE(ConstantCreateNoShape3, ConstantCreateNoShape3Fixture)
+{
+ Setup({}, { "output" });
+ RunTest<1>({}, { { "output", { 1.f, 1.f, 1.f, 1.f, 1.f } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Convolution2d.cpp b/src/armnnTfParser/test/Convolution2d.cpp
new file mode 100644
index 0000000000..a7c7648b81
--- /dev/null
+++ b/src/armnnTfParser/test/Convolution2d.cpp
@@ -0,0 +1,322 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct Convolution2dFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit Convolution2dFixture(const char* paddingType)
+ : Convolution2dFixture(paddingType, 1)
+ {}
+
+ // dilation: 0 - dilations attribute is not included;
+ // dilation: >0 - dilations attribute set to [1,v,v,1], where v is the value of the dilation arg
+ explicit Convolution2dFixture(const char* paddingType, int stride, int dilation = 0)
+ {
+ std::string strideString = std::to_string(stride);
+ std::string dilationString = std::to_string(dilation);
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " node { \n"
+ " name: \"Const_1\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " } \n"
+ " tensor_content: \"\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?\" \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"potato\" \n"
+ " op: \"Conv2D\" \n"
+ " input: \"graphInput\" \n"
+ " input: \"Const_1\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"data_format\" \n"
+ " value { \n"
+ " s: \"NHWC\" \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"padding\" \n"
+ " value { \n"
+ " s: \"";
+ m_Prototext.append(paddingType);
+ m_Prototext.append("\"\n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"strides\" \n"
+ " value { \n"
+ " list { \n"
+ " i: 1 \n"
+ " i: 1 \n"
+ " i: ");
+ m_Prototext.append(strideString);
+ m_Prototext.append(" \n"
+ " i: 1 \n"
+ " } \n"
+ " } \n"
+ " } \n");
+
+ if (dilation > 0)
+ {
+ m_Prototext.append(" attr { \n"
+ " key: \"dilations\" \n"
+ " value { \n"
+ " list { \n"
+ " i: 1 \n"
+ " i: ");
+ m_Prototext.append(dilationString);
+ m_Prototext.append(" \n"
+ " i: ");
+ m_Prototext.append(dilationString);
+ m_Prototext.append(" \n"
+ " i: 1 \n"
+ " } \n"
+ " } \n"
+ " } \n");
+ }
+ m_Prototext.append(" attr { \n"
+ " key: \"use_cudnn_on_gpu\" \n"
+ " value { \n"
+ " b: false \n"
+ " } \n"
+ " } \n"
+ "} \n");
+
+ // Manual height computation based on stride parameter.
+ BOOST_ASSERT_MSG(stride == 1 || stride==2, "Add support for strides other than 1 or 2.");
+ unsigned int dims[] = {1,2,3,1};
+ if (stride == 2)
+ {
+ dims[1]=3;
+ }
+
+ SetupSingleInputSingleOutput(armnn::TensorShape(4, dims), "graphInput", "potato");
+ }
+};
+
+
+struct Convolution2dSameFixture : Convolution2dFixture
+{
+ Convolution2dSameFixture() : Convolution2dFixture("SAME", 1){}
+};
+BOOST_FIXTURE_TEST_CASE(ParseConv2DSame, Convolution2dSameFixture)
+{
+ RunTest<4>({1, 2, 3, 4, 5, 6}, {2, 4, 4, 6.5f, 10 , 8.5f});
+}
+
+struct Convolution2dValidFixture : Convolution2dFixture
+{
+ Convolution2dValidFixture() : Convolution2dFixture("VALID", 1){}
+};
+BOOST_FIXTURE_TEST_CASE(ParseConv2DValid, Convolution2dValidFixture)
+{
+ RunTest<4>({1, 2, 3, 4, 5, 6}, {4, 10});
+}
+
+
+struct Convolution2dStride2SameFixture : Convolution2dFixture
+{
+ Convolution2dStride2SameFixture() : Convolution2dFixture("SAME", 2){}
+};
+BOOST_FIXTURE_TEST_CASE(ParseConv2DStride2Same, Convolution2dStride2SameFixture)
+{
+ RunTest<4>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {2, 4, 6.5, 8.5, 11, 13});
+}
+
+
+struct Convolution2dStride2ValidFixture : Convolution2dFixture
+{
+ Convolution2dStride2ValidFixture() : Convolution2dFixture("VALID", 2){}
+};
+BOOST_FIXTURE_TEST_CASE(ParseConv2DStride2Valid, Convolution2dStride2ValidFixture)
+{
+ RunTest<4>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {4, 10, 16});
+}
+
+
+struct Convolution2dDilation1Fixture : Convolution2dFixture
+{
+ Convolution2dDilation1Fixture() : Convolution2dFixture("SAME", 1, 1){}
+};
+BOOST_FIXTURE_TEST_CASE(ParseConv2DDilation1, Convolution2dDilation1Fixture)
+{
+ RunTest<4>({1, 2, 3, 4, 5, 6}, {2, 4, 4, 6.5f, 10 , 8.5f});
+}
+
+BOOST_AUTO_TEST_CASE(ParseConv2DDilation2)
+{
+ const char* prototext = ""
+ "node {\n"
+ " name: \"graphInput\"\n"
+ " op: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"shape\"\n"
+ " value {\n"
+ " shape {\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "node {\n"
+ " name: \"Const_1\"\n"
+ " op: \"Const\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"value\"\n"
+ " value {\n"
+ " tensor {\n"
+ " dtype: DT_FLOAT\n"
+ " tensor_shape {\n"
+ " dim {\n"
+ " size: 1\n"
+ " }\n"
+ " dim {\n"
+ " size: 3\n"
+ " }\n"
+ " dim {\n"
+ " size: 1\n"
+ " }\n"
+ " dim {\n"
+ " size: 1\n"
+ " }\n"
+ " }\n"
+ " tensor_content: \"\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?\"\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "node {\n"
+ " name: \"potato\"\n"
+ " op: \"Conv2D\"\n"
+ " input: \"graphInput\"\n"
+ " input: \"Const_1\"\n"
+ " attr {\n"
+ " key: \"T\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"data_format\"\n"
+ " value {\n"
+ " s: \"NHWC\"\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"padding\"\n"
+ " value {\n"
+ " s: \"SAME\"\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"strides\"\n"
+ " value {\n"
+ " list {\n"
+ " i: 1\n"
+ " i: 1\n"
+ " i: 1\n"
+ " i: 1\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"dilations\"\n"
+ " value {\n"
+ " list {\n"
+ " i: 1\n"
+ " i: 2\n"
+ " i: 2\n"
+ " i: 1\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"use_cudnn_on_gpu\"\n"
+ " value {\n"
+ " b: false\n"
+ " }\n"
+ " }\n"
+ "}\n";
+
+ std::map<std::string, armnn::TensorShape> inputShapes;
+ armnn::TensorShape tensorShape = { 1, 3, 3, 1 };
+ inputShapes["graphInput"] = tensorShape;
+ armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
+ BOOST_CHECK_EXCEPTION(parser->CreateNetworkFromString(prototext, inputShapes, { "potato" }),
+ armnn::ParseException,
+ [] (armnn::ParseException const& ex)->bool
+ {
+ return strcmp(ex.what(),
+ "ArmNN only supports Convolution layers with dilations [1,1,1,1]") == 0;
+ });
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/DepthwiseConvolution2d.cpp b/src/armnnTfParser/test/DepthwiseConvolution2d.cpp
new file mode 100644
index 0000000000..84e7a7e7a9
--- /dev/null
+++ b/src/armnnTfParser/test/DepthwiseConvolution2d.cpp
@@ -0,0 +1,166 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct DepthwiseConvolution2dFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit DepthwiseConvolution2dFixture(const char* paddingType)
+ {
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " } \n"
+ " tensor_content: \"\\000\\000\\200?\\000\\000\\000@\\000\\000@@\\000\\000\\200@"
+ "\\000\\000\\240@\\000\\000\\300@\\000\\000\\340@\\000\\000\\000A\\000\\000\\020A\" \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " node { \n"
+ " name: \"Const_1\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " dim { \n"
+ " size: 3 \n"
+ " } \n"
+ " } \n"
+ " tensor_content: \"\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?"
+ "\\000\\000\\000?\\000\\000\\200?\\000\\000\\000?\" \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"potato\" \n"
+ " op: \"DepthwiseConv2dNative\" \n"
+ " input: \"graphInput\" \n"
+ " input: \"Const_1\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"data_format\" \n"
+ " value { \n"
+ " s: \"NHWC\" \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"padding\" \n"
+ " value { \n"
+ " s: \"";
+ m_Prototext.append(paddingType);
+ m_Prototext.append("\"\n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"strides\" \n"
+ " value { \n"
+ " list { \n"
+ " i: 1 \n"
+ " i: 1 \n"
+ " i: 1 \n"
+ " i: 1 \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"use_cudnn_on_gpu\" \n"
+ " value { \n"
+ " b: false \n"
+ " } \n"
+ " } \n"
+ "} \n");
+
+ SetupSingleInputSingleOutput({ 1, 1, 3, 3 }, "graphInput", "potato");
+ }
+};
+
+struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dSameFixture() : DepthwiseConvolution2dFixture("SAME") { }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
+{
+ RunTest<4>({ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 2.5f, 5.f, 2.5f, 3.5f, 7.f, 3.5f, 4.5f, 9.f, 4.5f,
+ 6.f, 12.f, 6.f, 7.5f, 15.f, 7.5f, 9.f, 18.f, 9.f,
+ 5.5f, 11.f, 5.5f, 6.5f, 13.f, 6.5f, 7.5f, 15.f, 7.5f});
+}
+
+struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
+{
+ DepthwiseConvolution2dValidFixture() : DepthwiseConvolution2dFixture("VALID") { }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
+{
+ RunTest<4>({ 1, 2, 3, 4, 5, 6, 7, 8, 9 }, // input data
+ { 6.f, 12.f, 6.f, 7.5f, 15.f, 7.5f, 9.f, 18.f, 9.f }); // output expected data
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/FullyConnected.cpp b/src/armnnTfParser/test/FullyConnected.cpp
new file mode 100644
index 0000000000..2a7b4951b7
--- /dev/null
+++ b/src/armnnTfParser/test/FullyConnected.cpp
@@ -0,0 +1,579 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+#include "Runtime.hpp"
+#include "Network.hpp"
+#include "Graph.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+// In Tensorflow fully connected layers are expressed as a MatMul followed by an Add.
+// The TfParser must detect this case and convert them to a FullyConnected layer.
+struct FullyConnectedFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ FullyConnectedFixture()
+ {
+ // input = tf.placeholder(tf.float32, [1, 1], "input")
+ // weights = tf.constant([2], tf.float32, [1, 1])
+ // matmul = tf.matmul(input, weights)
+ // bias = tf.constant([1], tf.float32)
+ // output = tf.add(matmul, bias, name="output")
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+}
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "MatMul"
+ op: "MatMul"
+ input: "input"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+}
+node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 1.0
+ }
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Add"
+ input: "MatMul"
+ input: "Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ SetupSingleInputSingleOutput({ 1, 1 }, "input", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(FullyConnected, FullyConnectedFixture)
+{
+ RunTest<1>({ 3 }, { 7 });
+}
+
+// Similar to FullyConnectedFixture, but this time the MatMul's output is used by two Adds. This should result
+// in two FullyConnected layers being created.
+// I
+// |
+// M -- C
+// / \'
+// C-- A A -- C
+// \ /
+// A
+struct MatMulUsedInTwoFcFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MatMulUsedInTwoFcFixture()
+ {
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+}
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ float_val: 2.0
+ }
+ }
+ }
+}
+node {
+ name: "MatMul"
+ op: "MatMul"
+ input: "input"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+}
+node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 5.0
+ }
+ }
+ }
+}
+node {
+ name: "Const_2"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 15.0
+ }
+ }
+ }
+}
+node {
+ name: "Add"
+ op: "Add"
+ input: "MatMul"
+ input: "Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "Add_1"
+ op: "Add"
+ input: "MatMul"
+ input: "Const_2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Add"
+ input: "Add"
+ input: "Add_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ SetupSingleInputSingleOutput({ 1, 1 }, "input", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture)
+{
+ RunTest<1>({ 3 }, { 32 });
+ // Ideally we would check here that the armnn network has 5 layers:
+ // Input, 2 x FullyConnected (biased), Add and Output.
+ // This would make sure the parser hasn't incorrectly added some unconnected layers corresponding to the MatMul
+}
+
+// Similar to MatMulUsedInTwoFc, but this time the Adds are 'staggered' (see diagram), which means that only one
+// FullyConnected layer can be created (the other should just be an Add).
+// I
+// |
+// M -- C1
+// / \'
+// C2 -- A |
+// \ /
+// A
+struct MatMulUsedInTwoFcStaggeredFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MatMulUsedInTwoFcStaggeredFixture()
+ {
+ // input = tf.placeholder(tf.float32, shape=[1,1], name = "input")
+ // const1 = tf.constant([17], tf.float32, [1,1])
+ // mul = tf.matmul(input, const1)
+ // const2 = tf.constant([7], tf.float32, [1])
+ // fc = tf.add(mul, const2)
+ // output = tf.add(mul, fc, name="output")
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+}
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ float_val: 17.0
+ }
+ }
+ }
+}
+node {
+ name: "MatMul"
+ op: "MatMul"
+ input: "input"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+}
+node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 7.0
+ }
+ }
+ }
+}
+node {
+ name: "Add"
+ op: "Add"
+ input: "MatMul"
+ input: "Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Add"
+ input: "MatMul"
+ input: "Add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ SetupSingleInputSingleOutput({ 1, 1 }, "input", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFcStaggered, MatMulUsedInTwoFcStaggeredFixture)
+{
+ RunTest<1>({ 2 }, { 75 });
+ // Ideally we would check here that the armnn network has 5 layers:
+ // Input, FullyConnected (biased), FullyConnected (non biased), Add and Output.
+}
+
+// A MatMul in isolation, not connected to an add. Should result in a non-biased FullyConnected layer.
+struct MatMulFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MatMulFixture()
+ {
+ // input = tf.placeholder(tf.float32, shape = [1, 1], name = "input")
+ // const = tf.constant([17], tf.float32, [1, 1])
+ // output = tf.matmul(input, const, name = "output")
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+}
+node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ }
+ float_val: 17.0
+ }
+ }
+ }
+}
+node {
+ name: "output"
+ op: "MatMul"
+ input: "input"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+}
+ )";
+ SetupSingleInputSingleOutput({ 1, 1 }, "input", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MatMul, MatMulFixture)
+{
+ RunTest<1>({ 2 }, { 34 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/FusedBatchNorm.cpp b/src/armnnTfParser/test/FusedBatchNorm.cpp
new file mode 100644
index 0000000000..632d5f01f9
--- /dev/null
+++ b/src/armnnTfParser/test/FusedBatchNorm.cpp
@@ -0,0 +1,175 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct FusedBatchNormFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ FusedBatchNormFixture()
+ {
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"Const_1\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " } \n"
+ " float_val: 1.0 \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"Const_2\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " } \n"
+ " float_val: 0.0 \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"FusedBatchNormLayer/mean\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " } \n"
+ " float_val: 5.0 \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"FusedBatchNormLayer/variance\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_FLOAT \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " } \n"
+ " float_val: 2.0 \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"output\" \n"
+ " op: \"FusedBatchNorm\" \n"
+ " input: \"graphInput\" \n"
+ " input: \"Const_1\" \n"
+ " input: \"Const_2\" \n"
+ " input: \"FusedBatchNormLayer/mean\" \n"
+ " input: \"FusedBatchNormLayer/variance\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"data_format\" \n"
+ " value { \n"
+ " s: \"NHWC\" \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"epsilon\" \n"
+ " value { \n"
+ " f: 0.0010000000475 \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"is_training\" \n"
+ " value { \n"
+ " b: false \n"
+ " } \n"
+ " } \n"
+ "} \n";
+
+ SetupSingleInputSingleOutput({1, 3, 3, 1}, "graphInput", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseFusedBatchNorm, FusedBatchNormFixture)
+{
+ RunTest<4>({1, 2, 3, 4, 5, 6, 7, 8, 9}, // input data
+ {-2.8277204f, -2.12079024f, -1.4138602f,
+ -0.7069301f, 0.0f, 0.7069301f,
+ 1.4138602f, 2.12079024f, 2.8277204f}); // expected output data
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Identity.cpp b/src/armnnTfParser/test/Identity.cpp
new file mode 100644
index 0000000000..ca20de5760
--- /dev/null
+++ b/src/armnnTfParser/test/Identity.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct IdentitySimpleFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ IdentitySimpleFixture()
+ {
+ m_Prototext = "node{ "
+ " name: \"Placeholder\""
+ " op: \"Placeholder\""
+ " attr {"
+ " key: \"dtype\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ " attr {"
+ " key: \"shape\""
+ " value {"
+ " shape {"
+ " unknown_rank: true"
+ " }"
+ " }"
+ " }"
+ "}"
+ "node {"
+ " name: \"Identity\""
+ " op: \"Identity\""
+ " input: \"Placeholder\""
+ " attr {"
+ " key: \"T\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ "}";
+ SetupSingleInputSingleOutput({ 4 }, "Placeholder", "Identity");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(IdentitySimple, IdentitySimpleFixture)
+{
+ RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 1.0f, 2.0f, 3.0f, 4.0f });
+}
+
+struct IdentityFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ IdentityFixture()
+ {
+ m_Prototext = "node{ "
+ " name: \"Placeholder\""
+ " op: \"Placeholder\""
+ " attr {"
+ " key: \"dtype\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ " attr {"
+ " key: \"shape\""
+ " value {"
+ " shape {"
+ " unknown_rank: true"
+ " }"
+ " }"
+ " }"
+ "}"
+ "node {"
+ " name: \"Identity\""
+ " op: \"Identity\""
+ " input: \"Placeholder\""
+ " attr {"
+ " key: \"T\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ "}"
+ "node {"
+ " name: \"Add\""
+ " op: \"Add\""
+ " input: \"Identity\""
+ " input: \"Identity\""
+ " attr {"
+ " key: \"T\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ "}";
+ SetupSingleInputSingleOutput({ 4 }, "Placeholder", "Add");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseIdentity, IdentityFixture)
+{
+ RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 2.0f, 4.0f, 6.0f, 8.0f });
+}
+
+struct IdentityChainFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ IdentityChainFixture()
+ {
+ m_Prototext = "node{ "
+ " name: \"Placeholder\""
+ " op: \"Placeholder\""
+ " attr {"
+ " key: \"dtype\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ " attr {"
+ " key: \"shape\""
+ " value {"
+ " shape {"
+ " unknown_rank: true"
+ " }"
+ " }"
+ " }"
+ "}"
+ "node {"
+ " name: \"Identity\""
+ " op: \"Identity\""
+ " input: \"Placeholder\""
+ " attr {"
+ " key: \"T\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ "}"
+ "node {"
+ " name: \"Identity2\""
+ " op: \"Identity\""
+ " input: \"Identity\""
+ " attr {"
+ " key: \"T\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ "}";
+ SetupSingleInputSingleOutput({ 4 }, "Placeholder", "Identity2");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(IdentityChain, IdentityChainFixture)
+{
+ RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 1.0f, 2.0f, 3.0f, 4.0f });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/LocalResponseNormalization.cpp b/src/armnnTfParser/test/LocalResponseNormalization.cpp
new file mode 100644
index 0000000000..a7c2bfe3e1
--- /dev/null
+++ b/src/armnnTfParser/test/LocalResponseNormalization.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+
+struct LocalResponseNormalizationBaseFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit LocalResponseNormalizationBaseFixture(float alpha, float beta, float bias)
+ {
+ std::string alphaString = std::to_string(alpha);
+ std::string betaString = std::to_string(beta);
+ std::string biasString = std::to_string(bias);
+
+ m_Prototext = "node {"
+ " name: \"Placeholder\""
+ " op: \"Placeholder\""
+ " attr {"
+ " key: \"dtype\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ " attr {"
+ " key: \"shape\""
+ " value {"
+ " shape {"
+ " unknown_rank: true"
+ " }"
+ " }"
+ " }"
+ "}"
+ "node {"
+ " name: \"LRN\""
+ " op: \"LRN\""
+ " input: \"Placeholder\""
+ " attr {"
+ " key: \"T\""
+ " value {"
+ " type: DT_FLOAT"
+ " }"
+ " }"
+ " attr {"
+ " key: \"alpha\""
+ " value {"
+ " f: ";
+ m_Prototext.append(alphaString);
+ m_Prototext.append("\n"
+ " }"
+ " }"
+ " attr {"
+ " key: \"beta\""
+ " value {"
+ " f: ");
+ m_Prototext.append(betaString);
+ m_Prototext.append("\n"
+ " }"
+ " }"
+ " attr {"
+ " key: \"bias\""
+ " value {"
+ " f: ");
+ m_Prototext.append(biasString);
+ m_Prototext.append("\n"
+ " }"
+ " }"
+ " attr {"
+ " key: \"depth_radius\""
+ " value {"
+ " i: 1"
+ " }"
+ " }"
+ "}");
+ }
+};
+
+
+struct LocalResponseNormalizationFixtureSimple : public LocalResponseNormalizationBaseFixture
+{
+ explicit LocalResponseNormalizationFixtureSimple()
+ : LocalResponseNormalizationBaseFixture(1.0f, 1.0f, 1.0f)
+ {
+ SetupSingleInputSingleOutput({ 2, 2, 2, 1 }, "Placeholder", "LRN");
+ }
+};
+BOOST_FIXTURE_TEST_CASE(ParseSimpleLocalResponseNormalization, LocalResponseNormalizationFixtureSimple)
+{
+ RunTest<4>({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f },
+ { 0.5f, 0.4f, 0.3f, 0.23529412f, 0.1923077f, 0.16216217f, 0.14f, 0.12307692f });
+}
+
+
+struct LocalResponseNormalizationFixture : public LocalResponseNormalizationBaseFixture
+{
+ explicit LocalResponseNormalizationFixture()
+ : LocalResponseNormalizationBaseFixture(0.5f, 1.0f, 0.5f)
+ {
+ SetupSingleInputSingleOutput({1, 3, 3, 2}, "Placeholder", "LRN");
+ }
+};
+BOOST_FIXTURE_TEST_CASE(ParseLocalResponseNormalization, LocalResponseNormalizationFixture)
+{
+ RunTest<4>({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f},
+
+ {0.333333340f, 0.66666670f, 0.230769250f, 0.307692320f, 0.161290320f, 0.19354838f,
+ 0.122807020f, 0.14035088f, 0.098901100f, 0.109890110f, 0.082706770f, 0.09022556f,
+ 0.071038246f, 0.07650273f, 0.062240668f, 0.066390045f, 0.055374593f, 0.05863192f});
+}
+
+
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/MultiOutput.cpp b/src/armnnTfParser/test/MultiOutput.cpp
new file mode 100644
index 0000000000..56be33dab7
--- /dev/null
+++ b/src/armnnTfParser/test/MultiOutput.cpp
@@ -0,0 +1,144 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct MultiOutMatchFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MultiOutMatchFixture()
+ {
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "softmax1"
+ op: "Softmax"
+ input: "input:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ SetupSingleInputSingleOutput({ 1, 7 }, "input", "softmax1");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MultiOutMatch, MultiOutMatchFixture)
+{
+ // Note that the point of this test is to verify the parsing went well.
+ // Here we make sure the softmax has really connected to the input layer.
+ RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 });
+}
+
+struct MultiOutFailFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MultiOutFailFixture()
+ {
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "softmax1"
+ op: "Softmax"
+ input: "input:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ BOOST_CHECK_THROW(SetupSingleInputSingleOutput({ 1, 7 }, "input", "softmax1"), armnn::ParseException);
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MultiOutFail, MultiOutFailFixture)
+{
+ // Not running the graph because this is expected to throw an exception during parsing.
+}
+
+struct MultiOutInvalidFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MultiOutInvalidFixture()
+ {
+ m_Prototext = R"(
+node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "softmax1"
+ op: "Softmax"
+ input: "input:-1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ BOOST_CHECK_THROW(SetupSingleInputSingleOutput({ 1, 7 }, "input", "softmax1"), armnn::ParseException);
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MultiOutInvalid, MultiOutInvalidFixture)
+{
+ // Not running the graph because this is expected to throw an exception during parsing.
+}
+
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/armnnTfParser/test/Multiplication.cpp b/src/armnnTfParser/test/Multiplication.cpp
new file mode 100644
index 0000000000..3a20fd1141
--- /dev/null
+++ b/src/armnnTfParser/test/Multiplication.cpp
@@ -0,0 +1,172 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct MultiplicationFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MultiplicationFixture()
+ {
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " node { \n"
+ " name: \"softmax1\" \n"
+ " op: \"Softmax\" \n"
+ " input: \"graphInput\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " }\n"
+ " node {\n"
+ " name: \"softmax2\"\n"
+ " op : \"Softmax\"\n"
+ " input: \"graphInput\"\n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " }\n"
+ " node {\n"
+ " name: \"multiplication\"\n"
+ " op : \"Mul\"\n"
+ " input: \"softmax1\"\n"
+ " input: \"softmax2\"\n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " }\n";
+
+ SetupSingleInputSingleOutput({ 1, 7 }, "graphInput", "multiplication");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMultiplication, MultiplicationFixture)
+{
+ RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 });
+}
+
+struct MultiplicationBroadcastFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MultiplicationBroadcastFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1)
+ {
+ m_Prototext = R"(
+node {
+ name: "input0"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "input1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "output"
+ op: "Mul"
+ input: "input0"
+ input: "input1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+
+ Setup({ { "input0", inputShape0 },
+ { "input1", inputShape1 } },
+ { "output" });
+ }
+};
+
+struct MultiplicationBroadcastFixture4D1D : public MultiplicationBroadcastFixture
+{
+ MultiplicationBroadcastFixture4D1D() : MultiplicationBroadcastFixture({ 1, 2, 2, 3 }, { 1 }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast4D1D, MultiplicationBroadcastFixture4D1D)
+{
+ RunTest<4>({ { "input0", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } },
+ { "input1", { 5.0f } } },
+ { { "output", { 0.0f, 5.0f, 10.0f,
+ 15.0f, 20.0f, 25.0f,
+ 30.0f, 35.0f, 40.0f,
+ 45.0f, 50.0f, 55.0f } } });
+}
+
+struct MultiplicationBroadcastFixture1D4D : public MultiplicationBroadcastFixture
+{
+ MultiplicationBroadcastFixture1D4D() : MultiplicationBroadcastFixture({ 1 }, { 1, 2, 2, 3 }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast1D4D, MultiplicationBroadcastFixture1D4D)
+{
+ RunTest<4>({ { "input0", { 3.0f } },
+ { "input1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } } },
+ { { "output", { 0.0f, 3.0f, 6.0f,
+ 9.0f, 12.0f, 15.0f,
+ 18.0f, 21.0f, 24.0f,
+ 27.0f, 30.0f, 33.0f } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/PassThru.cpp b/src/armnnTfParser/test/PassThru.cpp
new file mode 100644
index 0000000000..8462ec27cc
--- /dev/null
+++ b/src/armnnTfParser/test/PassThru.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct PassThruFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ PassThruFixture()
+ {
+ m_Prototext = "node {\n"
+ " name: \"Placeholder\"\n"
+ " op: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"shape\"\n"
+ " value {\n"
+ " shape {\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n";
+ SetupSingleInputSingleOutput({ 1, 7 }, "Placeholder", "Placeholder");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ValidateOutput, PassThruFixture)
+{
+ BOOST_TEST(m_Parser->GetNetworkOutputBindingInfo("Placeholder").second.GetNumDimensions() == 2);
+ BOOST_TEST(m_Parser->GetNetworkOutputBindingInfo("Placeholder").second.GetShape()[0] == 1);
+ BOOST_TEST(m_Parser->GetNetworkOutputBindingInfo("Placeholder").second.GetShape()[1] == 7);
+}
+
+BOOST_FIXTURE_TEST_CASE(RunGraph, PassThruFixture)
+{
+ armnn::TensorInfo inputTensorInfo = m_Parser->GetNetworkInputBindingInfo("Placeholder").second;
+ auto input = MakeRandomTensor<float, 2>(inputTensorInfo, 378346);
+ std::vector<float> inputVec;
+ inputVec.assign(input.data(), input.data() + input.num_elements());
+ RunTest<2>(inputVec, inputVec); // The passthru network should output the same as the input
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Pooling.cpp b/src/armnnTfParser/test/Pooling.cpp
new file mode 100644
index 0000000000..36ffa47def
--- /dev/null
+++ b/src/armnnTfParser/test/Pooling.cpp
@@ -0,0 +1,112 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+
+struct Pooling2dFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ explicit Pooling2dFixture(const char* poolingtype)
+ {
+ m_Prototext = "node {\n"
+ " name: \"Placeholder\"\n"
+ " op: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"value\"\n"
+ " value {\n"
+ " tensor {\n"
+ " dtype: DT_FLOAT\n"
+ " tensor_shape {\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "node {\n"
+ " name: \"";
+ m_Prototext.append(poolingtype);
+ m_Prototext.append("\"\n"
+ " op: \"");
+ m_Prototext.append(poolingtype);
+ m_Prototext.append("\"\n"
+ " input: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"T\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"data_format\"\n"
+ " value {\n"
+ " s: \"NHWC\"\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"ksize\"\n"
+ " value {\n"
+ " list {\n"
+ " i: 1\n"
+ " i: 2\n"
+ " i: 2\n"
+ " i: 1\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"padding\"\n"
+ " value {\n"
+ " s: \"VALID\"\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"strides\"\n"
+ " value {\n"
+ " list {\n"
+ " i: 1\n"
+ " i: 1\n"
+ " i: 1\n"
+ " i: 1\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n");
+
+ SetupSingleInputSingleOutput({ 1, 2, 2, 1 }, "Placeholder", poolingtype);
+ }
+};
+
+
+struct MaxPoolFixture : Pooling2dFixture
+{
+ MaxPoolFixture() : Pooling2dFixture("MaxPool") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseMaxPool, MaxPoolFixture)
+{
+ RunTest<4>({1.0f, 2.0f, 3.0f, -4.0f}, {3.0f});
+}
+
+
+struct AvgPoolFixture : Pooling2dFixture
+{
+ AvgPoolFixture() : Pooling2dFixture("AvgPool") {}
+};
+BOOST_FIXTURE_TEST_CASE(ParseAvgPool, AvgPoolFixture)
+{
+ RunTest<4>({1.0f, 2.0f, 3.0f, 4.0f}, {2.5f});
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Reshape.cpp b/src/armnnTfParser/test/Reshape.cpp
new file mode 100644
index 0000000000..4eb6b12467
--- /dev/null
+++ b/src/armnnTfParser/test/Reshape.cpp
@@ -0,0 +1,86 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+
+struct ReshapeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ReshapeFixture()
+ {
+ m_Prototext = "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "node { \n"
+ " name: \"Reshape/shape\" \n"
+ " op: \"Const\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_INT32 \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"value\" \n"
+ " value { \n"
+ " tensor { \n"
+ " dtype: DT_INT32 \n"
+ " tensor_shape { \n"
+ " dim { \n"
+ " size: 2 \n"
+ " } \n"
+ " } \n"
+ " tensor_content: \"\\002\\000\\000\\000\\002\\000\\000\\000\" \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"Reshape\" \n"
+ " op: \"Reshape\" \n"
+ " input: \"graphInput\" \n"
+ " input: \"Reshape/shape\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"Tshape\" \n"
+ " value { \n"
+ " type: DT_INT32 \n"
+ " } \n"
+ " } \n"
+ "} \n";
+
+ SetupSingleInputSingleOutput({1, 4}, "graphInput", "Reshape");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseReshape, ReshapeFixture)
+{
+ RunTest<2>({ 0.0f, 1.0f, 2.0f, 3.0f }, { 0.0f, 1.0f, 2.0f, 3.0f });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/ResizeBilinear.cpp b/src/armnnTfParser/test/ResizeBilinear.cpp
new file mode 100644
index 0000000000..30d898f5bb
--- /dev/null
+++ b/src/armnnTfParser/test/ResizeBilinear.cpp
@@ -0,0 +1,114 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct ResizeBilinearFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ResizeBilinearFixture()
+ {
+ m_Prototext = R"(
+node {
+ name: "graphInput"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 1
+ }
+ }
+ tensor_content:
+"\000\000\000\000\000\000\200?\000\000\000@\000\000@@\000\000\200@\000\000\240@\000\000\300@\000\000\340@\000\000\000A"
+ }
+ }
+ }
+}
+node {
+ name: "resizeBilinearLayer/size"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ tensor_content: "\005\000\000\000\005\000\000\000"
+ }
+ }
+ }
+}
+node {
+ name: "resizeBilinearLayer"
+ op: "ResizeBilinear"
+ input: "graphInput"
+ input: "resizeBilinearLayer/size"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "align_corners"
+ value {
+ b: false
+ }
+ }
+}
+ )";
+
+ SetupSingleInputSingleOutput({ 1, 3, 3, 1 }, "graphInput", "resizeBilinearLayer");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, ResizeBilinearFixture)
+{
+ RunTest<4>(// input data
+ { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f },
+ // expected output data
+ { 0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
+ 1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
+ 3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
+ 5.4f, 6.0f, 6.6f, 7.2f, 7.4f,
+ 6.0f, 6.6f, 7.2f, 7.8f, 8.0f });
+
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Shape.cpp b/src/armnnTfParser/test/Shape.cpp
new file mode 100644
index 0000000000..7b414ecfac
--- /dev/null
+++ b/src/armnnTfParser/test/Shape.cpp
@@ -0,0 +1,94 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct ShapeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ ShapeFixture()
+ {
+ m_Prototext =
+ "node { \n"
+ " name: \"Placeholder\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 1 \n"
+ " } \n"
+ " dim { \n"
+ " size: 4 \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"shapeTest\" \n"
+ " op: \"Shape\" \n"
+ " input: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"out_type\" \n"
+ " value { \n"
+ " type: DT_INT32 \n"
+ " } \n"
+ " } \n"
+ "} \n"
+ "node { \n"
+ " name: \"Reshape\" \n"
+ " op: \"Reshape\" \n"
+ " input: \"Placeholder\" \n"
+ " input: \"shapeTest\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"Tshape\" \n"
+ " value { \n"
+ " type: DT_INT32 \n"
+ " } \n"
+ " } \n"
+ "} \n";
+
+ SetupSingleInputSingleOutput({1, 4}, "Placeholder", "Reshape");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseShape, ShapeFixture)
+{
+ // Note: the test's output cannot be an int32 const layer, because that cannot exist in the
+ // as ARMNN only supports u8 and float layers. For that reason I added a reshape layer
+ // which reshapes the input to its original dimensions, which is not changing it.
+ RunTest<2>({ 0.0f, 1.0f, 2.0f, 3.0f }, { 0.0f, 1.0f, 2.0f, 3.0f });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Softmax.cpp b/src/armnnTfParser/test/Softmax.cpp
new file mode 100644
index 0000000000..1ab28ea3aa
--- /dev/null
+++ b/src/armnnTfParser/test/Softmax.cpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct SoftmaxFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ SoftmaxFixture()
+ {
+ m_Prototext = "node {\n"
+ " name: \"blah\"\n"
+ " op: \"Placeholder\"\n"
+ " attr {\n"
+ " key: \"dtype\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ " attr {\n"
+ " key: \"shape\"\n"
+ " value {\n"
+ " shape {\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "node {\n"
+ " name: \"blah2\"\n"
+ " op: \"Softmax\"\n"
+ " input: \"blah\"\n"
+ " attr {\n"
+ " key: \"T\"\n"
+ " value {\n"
+ " type: DT_FLOAT\n"
+ " }\n"
+ " }\n"
+ "}\n";
+
+ SetupSingleInputSingleOutput({ 1, 7 }, "blah", "blah2");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseSoftmax, SoftmaxFixture)
+{
+ RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 });
+}
+
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/Squeeze.cpp b/src/armnnTfParser/test/Squeeze.cpp
new file mode 100644
index 0000000000..d2d7d49494
--- /dev/null
+++ b/src/armnnTfParser/test/Squeeze.cpp
@@ -0,0 +1,108 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+
+template <bool withDimZero, bool withDimOne>
+struct SqueezeFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ SqueezeFixture()
+ {
+ m_Prototext =
+ "node { \n"
+ " name: \"graphInput\" \n"
+ " op: \"Placeholder\" \n"
+ " attr { \n"
+ " key: \"dtype\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"shape\" \n"
+ " value { \n"
+ " shape { \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ " } \n"
+ "node { \n"
+ " name: \"Squeeze\" \n"
+ " op: \"Squeeze\" \n"
+ " input: \"graphInput\" \n"
+ " attr { \n"
+ " key: \"T\" \n"
+ " value { \n"
+ " type: DT_FLOAT \n"
+ " } \n"
+ " } \n"
+ " attr { \n"
+ " key: \"squeeze_dims\" \n"
+ " value { \n"
+ " list {\n";
+
+ if (withDimZero)
+ {
+ m_Prototext += "i:0\n";
+ }
+
+ if (withDimOne)
+ {
+ m_Prototext += "i:1\n";
+ }
+
+ m_Prototext +=
+ " } \n"
+ " } \n"
+ " } \n"
+ "} \n";
+
+ SetupSingleInputSingleOutput({ 1, 1, 2, 2 }, "graphInput", "Squeeze");
+ }
+};
+
+typedef SqueezeFixture<false, false> ImpliedDimensionsSqueezeFixture;
+typedef SqueezeFixture<true, false> ExplicitDimensionZeroSqueezeFixture;
+typedef SqueezeFixture<false, true> ExplicitDimensionOneSqueezeFixture;
+typedef SqueezeFixture<true, true> ExplicitDimensionsSqueezeFixture;
+
+BOOST_FIXTURE_TEST_CASE(ParseImplicitSqueeze, ImpliedDimensionsSqueezeFixture)
+{
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo("Squeeze").second.GetShape() ==
+ armnn::TensorShape({2,2})));
+ RunTest<2>({ 1.0f, 2.0f, 3.0f, 4.0f },
+ { 1.0f, 2.0f, 3.0f, 4.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseDimensionZeroSqueeze, ExplicitDimensionZeroSqueezeFixture)
+{
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo("Squeeze").second.GetShape() ==
+ armnn::TensorShape({1,2,2})));
+ RunTest<3>({ 1.0f, 2.0f, 3.0f, 4.0f },
+ { 1.0f, 2.0f, 3.0f, 4.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseDimensionOneSqueeze, ExplicitDimensionOneSqueezeFixture)
+{
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo("Squeeze").second.GetShape() ==
+ armnn::TensorShape({1,2,2})));
+ RunTest<3>({ 1.0f, 2.0f, 3.0f, 4.0f },
+ { 1.0f, 2.0f, 3.0f, 4.0f });
+}
+
+BOOST_FIXTURE_TEST_CASE(ParseExplicitDimensionsSqueeze, ExplicitDimensionsSqueezeFixture)
+{
+ BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo("Squeeze").second.GetShape() ==
+ armnn::TensorShape({2,2})));
+ RunTest<2>({ 1.0f, 2.0f, 3.0f, 4.0f },
+ { 1.0f, 2.0f, 3.0f, 4.0f });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/TestDependencies.cpp b/src/armnnTfParser/test/TestDependencies.cpp
new file mode 100644
index 0000000000..13ab17c5b6
--- /dev/null
+++ b/src/armnnTfParser/test/TestDependencies.cpp
@@ -0,0 +1,296 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+// Graph which tests that nodes are re-ordered in the queue when they are encountered a second time.
+// In this case R0 will be encountered first via R1 and then via R2. At that time
+// we need to make sure that R0 (and the I on which it is dependent) is moved to the front again
+// so that it is before both R1 and R2.
+// I
+// |
+// R0
+// / \'
+// R1 R2
+// \ |
+// \ R3
+// \|
+// O
+struct RediscoveredDependenciesFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ RediscoveredDependenciesFixture()
+ {
+ // input = tf.placeholder(tf.float32, 1, "input")
+ // relu0 = tf.nn.relu(input, "relu0")
+ // relu1 = tf.nn.relu(relu0, "relu1")
+ // relu2 = tf.nn.relu(relu0, "relu2")
+ // relu3 = tf.nn.relu(relu2, "relu3")
+ // output = tf.add(relu1, relu3, "output")
+ m_Prototext = R"(
+ node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "relu0"
+ op: "Relu"
+ input: "input"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "relu1"
+ op: "Relu"
+ input: "relu0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "relu2"
+ op: "Relu"
+ input: "relu0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "relu3"
+ op: "Relu"
+ input: "relu2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "output"
+ op: "Add"
+ input: "relu1"
+ input: "relu3"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+ SetupSingleInputSingleOutput({ 1 }, "input", "output");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(RediscoveredDependencies, RediscoveredDependenciesFixture)
+{
+ RunTest<1>({1}, {2});
+}
+
+// Tests that a simple cycle in the tensorflow graph will be detected and an exception thrown, rather than the TfParser
+// getting stuck in an infinite loop.
+BOOST_AUTO_TEST_CASE(SimpleCycle)
+{
+ const char* prototext = R"(
+node {
+ name: "r1"
+ op: "Relu"
+ input: "r2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "r2"
+ op: "Relu"
+ input: "r1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, {}, { "r2" }), armnn::ParseException);
+}
+
+// Similar to the above SimpleCycle test, but has a single node which connects to itself.
+BOOST_AUTO_TEST_CASE(SingleNodeCycle)
+{
+ const char* prototext = R"(
+node {
+ name: "r1"
+ op: "Relu"
+ input: "r1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, {}, { "r1" }), armnn::ParseException);
+}
+
+// Similar to the above SimpleCycle test, but with a more complicated graph.
+// I
+// |
+// A2---<---<-
+// / \' |
+// R1 R2 |
+// \ | |
+// \ R3 |
+// \| |
+// A1-->--->|
+//
+BOOST_AUTO_TEST_CASE(ComplexCycle)
+{
+ // input = tf.placeholder(tf.float32, 1, "input")
+ // add2 = tf.nn.relu(input, add1, "add2") // This line won't actually run in TF, because add1 is not yet defined
+ // relu1 = tf.nn.relu(relu0, "relu1")
+ // relu2 = tf.nn.relu(relu0, "relu2")
+ // relu3 = tf.nn.relu(relu2, "relu3")
+ // add1 = tf.add(relu1, relu3, "add1")
+ const char* prototext = R"(
+ node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "add2"
+ op: "Add"
+ input: "input"
+ input: "add1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "relu1"
+ op: "Relu"
+ input: "add2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "relu2"
+ op: "Relu"
+ input: "add2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "relu3"
+ op: "Relu"
+ input: "relu2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node {
+ name: "add1"
+ op: "Add"
+ input: "relu1"
+ input: "relu3"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ )";
+ armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, {}, { "add1" }), armnn::ParseException);
+}
+
+// Tests that a graph with an input that is not present throws a ParseException.
+BOOST_AUTO_TEST_CASE(InvalidInput)
+{
+ const char* prototext = R"(
+node {
+ name: "r1"
+ op: "Relu"
+ input: "a-node-that-does-not-exist"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create();
+ BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, {}, { "r1" }), armnn::ParseException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfParser/test/TestMultiInputsOutputs.cpp b/src/armnnTfParser/test/TestMultiInputsOutputs.cpp
new file mode 100644
index 0000000000..5eea616ec8
--- /dev/null
+++ b/src/armnnTfParser/test/TestMultiInputsOutputs.cpp
@@ -0,0 +1,92 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+
+#include <boost/test/unit_test.hpp>
+#include "armnnTfParser/ITfParser.hpp"
+#include "ParserPrototxtFixture.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowParser)
+
+struct MultiInputsOutputsFixture : public ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+ MultiInputsOutputsFixture()
+ {
+ // input1 = tf.placeholder(tf.float32, shape=[], name = "input1")
+ // input2 = tf.placeholder(tf.float32, shape = [], name = "input2")
+ // add1 = tf.add(input1, input2, name = "add1")
+ // add2 = tf.add(input1, input2, name = "add2")
+ m_Prototext = R"(
+node {
+ name: "input1"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "input2"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ }
+ }
+ }
+}
+node {
+ name: "add1"
+ op: "Add"
+ input: "input1"
+ input: "input2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+node {
+ name: "add2"
+ op: "Add"
+ input: "input1"
+ input: "input2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
+ )";
+ Setup({ { "input1", { 1 } },
+ { "input2", { 1 } } },
+ { "add1", "add2" });
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(MultiInputsOutputs, MultiInputsOutputsFixture)
+{
+ RunTest<1>({ { "input1", {12.0f} }, { "input2", { 13.0f } } },
+ { { "add1", { 25.0f } }, { "add2", { 25.0f } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()