From c577f2c6a3b4ddb6ba87a882723c53a248afbeba Mon Sep 17 00:00:00 2001 From: telsoa01 Date: Fri, 31 Aug 2018 09:22:23 +0100 Subject: Release 18.08 --- src/armnnTfParser/test/Activations.cpp | 6 +- src/armnnTfParser/test/Addition.cpp | 2 +- src/armnnTfParser/test/BiasAdd.cpp | 2 +- src/armnnTfParser/test/BroadcastForAdd.cpp | 6 +- src/armnnTfParser/test/Concat.cpp | 2 +- src/armnnTfParser/test/ConcatOfConcats.cpp | 2 +- src/armnnTfParser/test/Constant.cpp | 20 +-- src/armnnTfParser/test/Convolution2d.cpp | 15 +- src/armnnTfParser/test/DepthwiseConvolution2d.cpp | 2 +- src/armnnTfParser/test/FullyConnected.cpp | 38 ++--- src/armnnTfParser/test/FusedBatchNorm.cpp | 6 +- src/armnnTfParser/test/Identity.cpp | 6 +- .../test/LocalResponseNormalization.cpp | 3 +- src/armnnTfParser/test/MaximumForLeakyRelu.cpp | 169 +++++++++++++++++++++ src/armnnTfParser/test/MultiOutput.cpp | 6 +- src/armnnTfParser/test/Multiplication.cpp | 4 +- src/armnnTfParser/test/PassThru.cpp | 4 +- src/armnnTfParser/test/Pooling.cpp | 3 +- src/armnnTfParser/test/Reshape.cpp | 3 +- src/armnnTfParser/test/ResizeBilinear.cpp | 6 +- src/armnnTfParser/test/Shape.cpp | 7 +- src/armnnTfParser/test/Softmax.cpp | 2 +- src/armnnTfParser/test/Squeeze.cpp | 3 +- src/armnnTfParser/test/TestDependencies.cpp | 26 ++-- src/armnnTfParser/test/TestMultiInputsOutputs.cpp | 10 +- 25 files changed, 254 insertions(+), 99 deletions(-) create mode 100644 src/armnnTfParser/test/MaximumForLeakyRelu.cpp (limited to 'src/armnnTfParser/test') diff --git a/src/armnnTfParser/test/Activations.cpp b/src/armnnTfParser/test/Activations.cpp index 72ed64d653..595fce768e 100644 --- a/src/armnnTfParser/test/Activations.cpp +++ b/src/armnnTfParser/test/Activations.cpp @@ -9,8 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) - -struct ActivationFixture : public ParserPrototxtFixture +struct ActivationFixture : public armnnUtils::ParserPrototxtFixture { explicit ActivationFixture(const char* activationFunction) { @@ -107,7 +106,4 @@ BOOST_FIXTURE_TEST_CASE(ParseTanh, TanhFixture) { -0.09966799f, -0.19737528f, -0.29131261f, -0.379949f, 0.09966799f, 0.19737528f, 0.29131261f }); } - - - BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfParser/test/Addition.cpp b/src/armnnTfParser/test/Addition.cpp index c9e69268c6..c642b5a45a 100644 --- a/src/armnnTfParser/test/Addition.cpp +++ b/src/armnnTfParser/test/Addition.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct AdditionFixture : public ParserPrototxtFixture +struct AdditionFixture : public armnnUtils::ParserPrototxtFixture { AdditionFixture() { diff --git a/src/armnnTfParser/test/BiasAdd.cpp b/src/armnnTfParser/test/BiasAdd.cpp index e29aeb1057..1e9911d717 100644 --- a/src/armnnTfParser/test/BiasAdd.cpp +++ b/src/armnnTfParser/test/BiasAdd.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct BiasAddFixture : public ParserPrototxtFixture +struct BiasAddFixture : public armnnUtils::ParserPrototxtFixture { explicit BiasAddFixture(const std::string& dataFormat) { diff --git a/src/armnnTfParser/test/BroadcastForAdd.cpp b/src/armnnTfParser/test/BroadcastForAdd.cpp index 4c9731d7fc..aab6dbfd79 100644 --- a/src/armnnTfParser/test/BroadcastForAdd.cpp +++ b/src/armnnTfParser/test/BroadcastForAdd.cpp @@ -6,10 +6,10 @@ #include #include "armnnTfParser/ITfParser.hpp" #include "ParserPrototxtFixture.hpp" -// This is a special case for add, which supports broadcasting +// This is a special case for add, which supports broadcasting. BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct BroadcastForAddFixtureSlot1 : public ParserPrototxtFixture +struct BroadcastForAddFixtureSlot1 : public armnnUtils::ParserPrototxtFixture { BroadcastForAddFixtureSlot1() { @@ -71,7 +71,7 @@ struct BroadcastForAddFixtureSlot1 : public ParserPrototxtFixture +struct BroadcastForAddFixtureSlot0 : public armnnUtils::ParserPrototxtFixture { BroadcastForAddFixtureSlot0() { diff --git a/src/armnnTfParser/test/Concat.cpp b/src/armnnTfParser/test/Concat.cpp index a7d5ea03af..3e39bef2e7 100644 --- a/src/armnnTfParser/test/Concat.cpp +++ b/src/armnnTfParser/test/Concat.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct ConcatFixture : public ParserPrototxtFixture +struct ConcatFixture : public armnnUtils::ParserPrototxtFixture { explicit ConcatFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1, unsigned int concatDim) diff --git a/src/armnnTfParser/test/ConcatOfConcats.cpp b/src/armnnTfParser/test/ConcatOfConcats.cpp index 7316b9f1ac..2832159acc 100644 --- a/src/armnnTfParser/test/ConcatOfConcats.cpp +++ b/src/armnnTfParser/test/ConcatOfConcats.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct ConcatOfConcatsFixture : public ParserPrototxtFixture +struct ConcatOfConcatsFixture : public armnnUtils::ParserPrototxtFixture { explicit ConcatOfConcatsFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1, const armnn::TensorShape& inputShape2, const armnn::TensorShape& inputShape3, diff --git a/src/armnnTfParser/test/Constant.cpp b/src/armnnTfParser/test/Constant.cpp index 09587fc3d5..bc8b36d61b 100644 --- a/src/armnnTfParser/test/Constant.cpp +++ b/src/armnnTfParser/test/Constant.cpp @@ -14,13 +14,13 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) // Tests that a Const node in Tensorflow can be converted to a ConstLayer in armnn (as opposed to most // Const nodes which are used as weight inputs for convolutions etc. and are therefore not converted to // armnn ConstLayers). -struct ConstantFixture : public ParserPrototxtFixture +struct ConstantFixture : public armnnUtils::ParserPrototxtFixture { ConstantFixture() { - // input = tf.placeholder(tf.float32, name = "input") - // const = tf.constant([17], tf.float32, [1]) - // output = tf.add(input, const, name = "output") + // Input = tf.placeholder(tf.float32, name = "input") + // Const = tf.constant([17], tf.float32, [1]) + // Output = tf.add(input, const, name = "output") m_Prototext = R"( node { @@ -90,12 +90,12 @@ BOOST_FIXTURE_TEST_CASE(Constant, ConstantFixture) // Tests that a single Const node in Tensorflow can be used twice by a dependant node. This should result in only // a single armnn ConstLayer being created. -struct ConstantReusedFixture : public ParserPrototxtFixture +struct ConstantReusedFixture : public armnnUtils::ParserPrototxtFixture { ConstantReusedFixture() { - // const = tf.constant([17], tf.float32, [1]) - // output = tf.add(const, const, name = "output") + // Const = tf.constant([17], tf.float32, [1]) + // Output = tf.add(const, const, name = "output") m_Prototext = R"( node { @@ -145,7 +145,7 @@ BOOST_FIXTURE_TEST_CASE(ConstantReused, ConstantReusedFixture) } template -struct ConstantValueListFixture : public ParserPrototxtFixture +struct ConstantValueListFixture : public armnnUtils::ParserPrototxtFixture { ConstantValueListFixture() { @@ -180,7 +180,7 @@ node { m_Prototext += std::string("float_val : ") + std::to_string(value) + "\n"; } - m_Prototext += + m_Prototext += R"( } } @@ -209,7 +209,7 @@ BOOST_FIXTURE_TEST_CASE(ConstantMaxValueList, ConstantMaxValueListFixture) } template -struct ConstantCreateFixture : public ParserPrototxtFixture +struct ConstantCreateFixture : public armnnUtils::ParserPrototxtFixture { ConstantCreateFixture() { diff --git a/src/armnnTfParser/test/Convolution2d.cpp b/src/armnnTfParser/test/Convolution2d.cpp index a7c7648b81..8ad1036ef1 100644 --- a/src/armnnTfParser/test/Convolution2d.cpp +++ b/src/armnnTfParser/test/Convolution2d.cpp @@ -11,14 +11,14 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct Convolution2dFixture : public ParserPrototxtFixture +struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture { explicit Convolution2dFixture(const char* paddingType) : Convolution2dFixture(paddingType, 1) {} - // dilation: 0 - dilations attribute is not included; - // dilation: >0 - dilations attribute set to [1,v,v,1], where v is the value of the dilation arg + // Dilation: 0 - dilations attribute is not included; + // Dilation: >0 - dilations attribute set to [1,v,v,1], where v is the value of the dilation arg explicit Convolution2dFixture(const char* paddingType, int stride, int dilation = 0) { std::string strideString = std::to_string(stride); @@ -309,13 +309,8 @@ BOOST_AUTO_TEST_CASE(ParseConv2DDilation2) armnn::TensorShape tensorShape = { 1, 3, 3, 1 }; inputShapes["graphInput"] = tensorShape; armnnTfParser::ITfParserPtr parser = armnnTfParser::ITfParser::Create(); - BOOST_CHECK_EXCEPTION(parser->CreateNetworkFromString(prototext, inputShapes, { "potato" }), - armnn::ParseException, - [] (armnn::ParseException const& ex)->bool - { - return strcmp(ex.what(), - "ArmNN only supports Convolution layers with dilations [1,1,1,1]") == 0; - }); + BOOST_CHECK_THROW(parser->CreateNetworkFromString(prototext, inputShapes, { "potato" }), + armnn::ParseException); } diff --git a/src/armnnTfParser/test/DepthwiseConvolution2d.cpp b/src/armnnTfParser/test/DepthwiseConvolution2d.cpp index 84e7a7e7a9..a44f94957b 100644 --- a/src/armnnTfParser/test/DepthwiseConvolution2d.cpp +++ b/src/armnnTfParser/test/DepthwiseConvolution2d.cpp @@ -11,7 +11,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct DepthwiseConvolution2dFixture : public ParserPrototxtFixture +struct DepthwiseConvolution2dFixture : public armnnUtils::ParserPrototxtFixture { explicit DepthwiseConvolution2dFixture(const char* paddingType) { diff --git a/src/armnnTfParser/test/FullyConnected.cpp b/src/armnnTfParser/test/FullyConnected.cpp index 2a7b4951b7..e7f040e784 100644 --- a/src/armnnTfParser/test/FullyConnected.cpp +++ b/src/armnnTfParser/test/FullyConnected.cpp @@ -14,15 +14,15 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) // In Tensorflow fully connected layers are expressed as a MatMul followed by an Add. // The TfParser must detect this case and convert them to a FullyConnected layer. -struct FullyConnectedFixture : public ParserPrototxtFixture +struct FullyConnectedFixture : public armnnUtils::ParserPrototxtFixture { FullyConnectedFixture() { - // input = tf.placeholder(tf.float32, [1, 1], "input") - // weights = tf.constant([2], tf.float32, [1, 1]) - // matmul = tf.matmul(input, weights) - // bias = tf.constant([1], tf.float32) - // output = tf.add(matmul, bias, name="output") + // Input = tf.placeholder(tf.float32, [1, 1], "input") + // Weights = tf.constant([2], tf.float32, [1, 1]) + // Matmul = tf.matmul(input, weights) + // Bias = tf.constant([1], tf.float32) + // Output = tf.add(matmul, bias, name="output") m_Prototext = R"( node { name: "input" @@ -153,7 +153,7 @@ BOOST_FIXTURE_TEST_CASE(FullyConnected, FullyConnectedFixture) // C-- A A -- C // \ / // A -struct MatMulUsedInTwoFcFixture : public ParserPrototxtFixture +struct MatMulUsedInTwoFcFixture : public armnnUtils::ParserPrototxtFixture { MatMulUsedInTwoFcFixture() { @@ -326,7 +326,7 @@ BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture) RunTest<1>({ 3 }, { 32 }); // Ideally we would check here that the armnn network has 5 layers: // Input, 2 x FullyConnected (biased), Add and Output. - // This would make sure the parser hasn't incorrectly added some unconnected layers corresponding to the MatMul + // This would make sure the parser hasn't incorrectly added some unconnected layers corresponding to the MatMul. } // Similar to MatMulUsedInTwoFc, but this time the Adds are 'staggered' (see diagram), which means that only one @@ -338,16 +338,16 @@ BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture) // C2 -- A | // \ / // A -struct MatMulUsedInTwoFcStaggeredFixture : public ParserPrototxtFixture +struct MatMulUsedInTwoFcStaggeredFixture : public armnnUtils::ParserPrototxtFixture { MatMulUsedInTwoFcStaggeredFixture() { - // input = tf.placeholder(tf.float32, shape=[1,1], name = "input") - // const1 = tf.constant([17], tf.float32, [1,1]) - // mul = tf.matmul(input, const1) - // const2 = tf.constant([7], tf.float32, [1]) - // fc = tf.add(mul, const2) - // output = tf.add(mul, fc, name="output") + // Input = tf.placeholder(tf.float32, shape=[1,1], name = "input") + // Const1 = tf.constant([17], tf.float32, [1,1]) + // Mul = tf.matmul(input, const1) + // Monst2 = tf.constant([7], tf.float32, [1]) + // Fc = tf.add(mul, const2) + // Output = tf.add(mul, fc, name="output") m_Prototext = R"( node { name: "input" @@ -484,13 +484,13 @@ BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFcStaggered, MatMulUsedInTwoFcStaggeredFi } // A MatMul in isolation, not connected to an add. Should result in a non-biased FullyConnected layer. -struct MatMulFixture : public ParserPrototxtFixture +struct MatMulFixture : public armnnUtils::ParserPrototxtFixture { MatMulFixture() { - // input = tf.placeholder(tf.float32, shape = [1, 1], name = "input") - // const = tf.constant([17], tf.float32, [1, 1]) - // output = tf.matmul(input, const, name = "output") + // Input = tf.placeholder(tf.float32, shape = [1, 1], name = "input") + // Const = tf.constant([17], tf.float32, [1, 1]) + // Output = tf.matmul(input, const, name = "output") m_Prototext = R"( node { name: "input" diff --git a/src/armnnTfParser/test/FusedBatchNorm.cpp b/src/armnnTfParser/test/FusedBatchNorm.cpp index 632d5f01f9..69f018f194 100644 --- a/src/armnnTfParser/test/FusedBatchNorm.cpp +++ b/src/armnnTfParser/test/FusedBatchNorm.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct FusedBatchNormFixture : public ParserPrototxtFixture +struct FusedBatchNormFixture : public armnnUtils::ParserPrototxtFixture { FusedBatchNormFixture() { @@ -166,10 +166,10 @@ struct FusedBatchNormFixture : public ParserPrototxtFixture({1, 2, 3, 4, 5, 6, 7, 8, 9}, // input data + RunTest<4>({1, 2, 3, 4, 5, 6, 7, 8, 9}, // Input data. {-2.8277204f, -2.12079024f, -1.4138602f, -0.7069301f, 0.0f, 0.7069301f, - 1.4138602f, 2.12079024f, 2.8277204f}); // expected output data + 1.4138602f, 2.12079024f, 2.8277204f}); // Expected output data. } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfParser/test/Identity.cpp b/src/armnnTfParser/test/Identity.cpp index ca20de5760..9baa8988f3 100644 --- a/src/armnnTfParser/test/Identity.cpp +++ b/src/armnnTfParser/test/Identity.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct IdentitySimpleFixture : public ParserPrototxtFixture +struct IdentitySimpleFixture : public armnnUtils::ParserPrototxtFixture { IdentitySimpleFixture() { @@ -51,7 +51,7 @@ BOOST_FIXTURE_TEST_CASE(IdentitySimple, IdentitySimpleFixture) RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 1.0f, 2.0f, 3.0f, 4.0f }); } -struct IdentityFixture : public ParserPrototxtFixture +struct IdentityFixture : public armnnUtils::ParserPrototxtFixture { IdentityFixture() { @@ -105,7 +105,7 @@ BOOST_FIXTURE_TEST_CASE(ParseIdentity, IdentityFixture) RunTest<1>({ 1.0f, 2.0f, 3.0f, 4.0f }, { 2.0f, 4.0f, 6.0f, 8.0f }); } -struct IdentityChainFixture : public ParserPrototxtFixture +struct IdentityChainFixture : public armnnUtils::ParserPrototxtFixture { IdentityChainFixture() { diff --git a/src/armnnTfParser/test/LocalResponseNormalization.cpp b/src/armnnTfParser/test/LocalResponseNormalization.cpp index a7c2bfe3e1..dcfbbb6918 100644 --- a/src/armnnTfParser/test/LocalResponseNormalization.cpp +++ b/src/armnnTfParser/test/LocalResponseNormalization.cpp @@ -9,8 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) - -struct LocalResponseNormalizationBaseFixture : public ParserPrototxtFixture +struct LocalResponseNormalizationBaseFixture : public armnnUtils::ParserPrototxtFixture { explicit LocalResponseNormalizationBaseFixture(float alpha, float beta, float bias) { diff --git a/src/armnnTfParser/test/MaximumForLeakyRelu.cpp b/src/armnnTfParser/test/MaximumForLeakyRelu.cpp new file mode 100644 index 0000000000..a2566fced5 --- /dev/null +++ b/src/armnnTfParser/test/MaximumForLeakyRelu.cpp @@ -0,0 +1,169 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include +#include "armnnTfParser/ITfParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(TensorflowParser) + +struct UnsupportedMaximumFixture + : public armnnUtils::ParserPrototxtFixture +{ + UnsupportedMaximumFixture() + { + m_Prototext = R"( + node { + name: "graphInput" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "shape" + value { + shape { + } + } + } + } + node { + name: "Maximum" + op: "Maximum" + input: "graphInput" + input: "graphInput" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + } + )"; + } +}; + +BOOST_FIXTURE_TEST_CASE(UnsupportedMaximum, UnsupportedMaximumFixture) +{ + BOOST_CHECK_THROW( + SetupSingleInputSingleOutput({ 1, 1 }, "graphInput", "Maximum"), + armnn::ParseException); +} + +struct SupportedMaximumFixture + : public armnnUtils::ParserPrototxtFixture +{ + SupportedMaximumFixture(const std::string & maxInput0, + const std::string & maxInput1, + const std::string & mulInput0, + const std::string & mulInput1) + { + m_Prototext = R"( + node { + name: "graphInput" + op: "Placeholder" + attr { + key: "dtype" + value { type: DT_FLOAT } + } + attr { + key: "shape" + value { shape { } } + } + } + node { + name: "Alpha" + op: "Const" + attr { + key: "dtype" + value { type: DT_FLOAT } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { size: 1 } + } + float_val: 0.1 + } + } + } + } + node { + name: "Mul" + op: "Mul" + input: ")" + mulInput0 + R"(" + input: ")" + mulInput1 + R"(" + attr { + key: "T" + value { type: DT_FLOAT } + } + } + node { + name: "Maximum" + op: "Maximum" + input: ")" + maxInput0 + R"(" + input: ")" + maxInput1 + R"(" + attr { + key: "T" + value { type: DT_FLOAT } + } + } + )"; + SetupSingleInputSingleOutput({ 1, 2 }, "graphInput", "Maximum"); + } +}; + +struct LeakyRelu_Max_MulAT_T_Fixture : public SupportedMaximumFixture +{ + LeakyRelu_Max_MulAT_T_Fixture() + : SupportedMaximumFixture("Mul","graphInput","Alpha","graphInput") {} +}; + +BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_MulAT_T, LeakyRelu_Max_MulAT_T_Fixture) +{ + RunTest<2>(std::vector({-5.0, 3.0}), {-0.5, 3.0}); +} + +struct LeakyRelu_Max_T_MulAT_Fixture : public SupportedMaximumFixture +{ + LeakyRelu_Max_T_MulAT_Fixture() + : SupportedMaximumFixture("graphInput","Mul","Alpha","graphInput") {} +}; + + +BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_T_MulAT, LeakyRelu_Max_T_MulAT_Fixture) +{ + RunTest<2>(std::vector({-10.0, 3.0}), {-1.0, 3.0}); +} + +struct LeakyRelu_Max_MulTA_T_Fixture : public SupportedMaximumFixture +{ + LeakyRelu_Max_MulTA_T_Fixture() + : SupportedMaximumFixture("Mul", "graphInput","graphInput","Alpha") {} +}; + +BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_MulTA_T, LeakyRelu_Max_MulTA_T_Fixture) +{ + RunTest<2>(std::vector({-5.0, 3.0}), {-0.5, 3.0}); +} + +struct LeakyRelu_Max_T_MulTA_Fixture : public SupportedMaximumFixture +{ + LeakyRelu_Max_T_MulTA_Fixture() + : SupportedMaximumFixture("graphInput", "Mul", "graphInput", "Alpha") {} +}; + +BOOST_FIXTURE_TEST_CASE(LeakyRelu_Max_T_MulTA, LeakyRelu_Max_T_MulTA_Fixture) +{ + RunTest<2>(std::vector({-10.0, 13.0}), {-1.0, 13.0}); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfParser/test/MultiOutput.cpp b/src/armnnTfParser/test/MultiOutput.cpp index 56be33dab7..7a163ef582 100644 --- a/src/armnnTfParser/test/MultiOutput.cpp +++ b/src/armnnTfParser/test/MultiOutput.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct MultiOutMatchFixture : public ParserPrototxtFixture +struct MultiOutMatchFixture : public armnnUtils::ParserPrototxtFixture { MultiOutMatchFixture() { @@ -54,7 +54,7 @@ BOOST_FIXTURE_TEST_CASE(MultiOutMatch, MultiOutMatchFixture) RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 }); } -struct MultiOutFailFixture : public ParserPrototxtFixture +struct MultiOutFailFixture : public armnnUtils::ParserPrototxtFixture { MultiOutFailFixture() { @@ -97,7 +97,7 @@ BOOST_FIXTURE_TEST_CASE(MultiOutFail, MultiOutFailFixture) // Not running the graph because this is expected to throw an exception during parsing. } -struct MultiOutInvalidFixture : public ParserPrototxtFixture +struct MultiOutInvalidFixture : public armnnUtils::ParserPrototxtFixture { MultiOutInvalidFixture() { diff --git a/src/armnnTfParser/test/Multiplication.cpp b/src/armnnTfParser/test/Multiplication.cpp index 3a20fd1141..ca9c416ca5 100644 --- a/src/armnnTfParser/test/Multiplication.cpp +++ b/src/armnnTfParser/test/Multiplication.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct MultiplicationFixture : public ParserPrototxtFixture +struct MultiplicationFixture : public armnnUtils::ParserPrototxtFixture { MultiplicationFixture() { @@ -74,7 +74,7 @@ BOOST_FIXTURE_TEST_CASE(ParseMultiplication, MultiplicationFixture) RunTest<2>({ 0, 0, 10000, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0 }); } -struct MultiplicationBroadcastFixture : public ParserPrototxtFixture +struct MultiplicationBroadcastFixture : public armnnUtils::ParserPrototxtFixture { MultiplicationBroadcastFixture(const armnn::TensorShape& inputShape0, const armnn::TensorShape& inputShape1) { diff --git a/src/armnnTfParser/test/PassThru.cpp b/src/armnnTfParser/test/PassThru.cpp index 8462ec27cc..bba9ea579b 100644 --- a/src/armnnTfParser/test/PassThru.cpp +++ b/src/armnnTfParser/test/PassThru.cpp @@ -8,7 +8,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct PassThruFixture : public ParserPrototxtFixture +struct PassThruFixture : public armnnUtils::ParserPrototxtFixture { PassThruFixture() { @@ -46,7 +46,7 @@ BOOST_FIXTURE_TEST_CASE(RunGraph, PassThruFixture) auto input = MakeRandomTensor(inputTensorInfo, 378346); std::vector inputVec; inputVec.assign(input.data(), input.data() + input.num_elements()); - RunTest<2>(inputVec, inputVec); // The passthru network should output the same as the input + RunTest<2>(inputVec, inputVec); // The passthru network should output the same as the input. } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfParser/test/Pooling.cpp b/src/armnnTfParser/test/Pooling.cpp index 36ffa47def..f603b22afd 100644 --- a/src/armnnTfParser/test/Pooling.cpp +++ b/src/armnnTfParser/test/Pooling.cpp @@ -9,8 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) - -struct Pooling2dFixture : public ParserPrototxtFixture +struct Pooling2dFixture : public armnnUtils::ParserPrototxtFixture { explicit Pooling2dFixture(const char* poolingtype) { diff --git a/src/armnnTfParser/test/Reshape.cpp b/src/armnnTfParser/test/Reshape.cpp index 4eb6b12467..2fe84359fa 100644 --- a/src/armnnTfParser/test/Reshape.cpp +++ b/src/armnnTfParser/test/Reshape.cpp @@ -9,8 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) - -struct ReshapeFixture : public ParserPrototxtFixture +struct ReshapeFixture : public armnnUtils::ParserPrototxtFixture { ReshapeFixture() { diff --git a/src/armnnTfParser/test/ResizeBilinear.cpp b/src/armnnTfParser/test/ResizeBilinear.cpp index 30d898f5bb..2aad0a651d 100644 --- a/src/armnnTfParser/test/ResizeBilinear.cpp +++ b/src/armnnTfParser/test/ResizeBilinear.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct ResizeBilinearFixture : public ParserPrototxtFixture +struct ResizeBilinearFixture : public armnnUtils::ParserPrototxtFixture { ResizeBilinearFixture() { @@ -98,11 +98,11 @@ node { BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, ResizeBilinearFixture) { - RunTest<4>(// input data + RunTest<4>(// Input data. { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f }, - // expected output data + // Expected output data. { 0.0f, 0.6f, 1.2f, 1.8f, 2.0f, 1.8f, 2.4f, 3.0f, 3.6f, 3.8f, 3.6f, 4.2f, 4.8f, 5.4f, 5.6f, diff --git a/src/armnnTfParser/test/Shape.cpp b/src/armnnTfParser/test/Shape.cpp index 7b414ecfac..959d69bb73 100644 --- a/src/armnnTfParser/test/Shape.cpp +++ b/src/armnnTfParser/test/Shape.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct ShapeFixture : public ParserPrototxtFixture +struct ShapeFixture : public armnnUtils::ParserPrototxtFixture { ShapeFixture() { @@ -85,9 +85,8 @@ struct ShapeFixture : public ParserPrototxtFixture BOOST_FIXTURE_TEST_CASE(ParseShape, ShapeFixture) { - // Note: the test's output cannot be an int32 const layer, because that cannot exist in the - // as ARMNN only supports u8 and float layers. For that reason I added a reshape layer - // which reshapes the input to its original dimensions, which is not changing it. + // Note: the test's output cannot be an int32 const layer, because ARMNN only supports u8 and float layers. + // For that reason I added a reshape layer which reshapes the input to its original dimensions. RunTest<2>({ 0.0f, 1.0f, 2.0f, 3.0f }, { 0.0f, 1.0f, 2.0f, 3.0f }); } diff --git a/src/armnnTfParser/test/Softmax.cpp b/src/armnnTfParser/test/Softmax.cpp index 1ab28ea3aa..0b55816982 100644 --- a/src/armnnTfParser/test/Softmax.cpp +++ b/src/armnnTfParser/test/Softmax.cpp @@ -9,7 +9,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct SoftmaxFixture : public ParserPrototxtFixture +struct SoftmaxFixture : public armnnUtils::ParserPrototxtFixture { SoftmaxFixture() { diff --git a/src/armnnTfParser/test/Squeeze.cpp b/src/armnnTfParser/test/Squeeze.cpp index d2d7d49494..1722b630ac 100644 --- a/src/armnnTfParser/test/Squeeze.cpp +++ b/src/armnnTfParser/test/Squeeze.cpp @@ -9,9 +9,8 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) - template -struct SqueezeFixture : public ParserPrototxtFixture +struct SqueezeFixture : public armnnUtils::ParserPrototxtFixture { SqueezeFixture() { diff --git a/src/armnnTfParser/test/TestDependencies.cpp b/src/armnnTfParser/test/TestDependencies.cpp index 13ab17c5b6..fa26a1c0e0 100644 --- a/src/armnnTfParser/test/TestDependencies.cpp +++ b/src/armnnTfParser/test/TestDependencies.cpp @@ -22,16 +22,16 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) // \ R3 // \| // O -struct RediscoveredDependenciesFixture : public ParserPrototxtFixture +struct RediscoveredDependenciesFixture : public armnnUtils::ParserPrototxtFixture { RediscoveredDependenciesFixture() { - // input = tf.placeholder(tf.float32, 1, "input") - // relu0 = tf.nn.relu(input, "relu0") - // relu1 = tf.nn.relu(relu0, "relu1") - // relu2 = tf.nn.relu(relu0, "relu2") - // relu3 = tf.nn.relu(relu2, "relu3") - // output = tf.add(relu1, relu3, "output") + // Input = tf.placeholder(tf.float32, 1, "input") + // Relu0 = tf.nn.relu(input, "relu0") + // Relu1 = tf.nn.relu(relu0, "relu1") + // Relu2 = tf.nn.relu(relu0, "relu2") + // Relu3 = tf.nn.relu(relu2, "relu3") + // Output = tf.add(relu1, relu3, "output") m_Prototext = R"( node { name: "input" @@ -184,12 +184,12 @@ node { // BOOST_AUTO_TEST_CASE(ComplexCycle) { - // input = tf.placeholder(tf.float32, 1, "input") - // add2 = tf.nn.relu(input, add1, "add2") // This line won't actually run in TF, because add1 is not yet defined - // relu1 = tf.nn.relu(relu0, "relu1") - // relu2 = tf.nn.relu(relu0, "relu2") - // relu3 = tf.nn.relu(relu2, "relu3") - // add1 = tf.add(relu1, relu3, "add1") + // Input = tf.placeholder(tf.float32, 1, "input") + // Add2 = tf.nn.relu(input, add1, "add2") // This line won't actually run in TF, because add1 is not yet defined + // Relu1 = tf.nn.relu(relu0, "relu1") + // Relu2 = tf.nn.relu(relu0, "relu2") + // Relu3 = tf.nn.relu(relu2, "relu3") + // Add1 = tf.add(relu1, relu3, "add1") const char* prototext = R"( node { name: "input" diff --git a/src/armnnTfParser/test/TestMultiInputsOutputs.cpp b/src/armnnTfParser/test/TestMultiInputsOutputs.cpp index 5eea616ec8..c7889f3966 100644 --- a/src/armnnTfParser/test/TestMultiInputsOutputs.cpp +++ b/src/armnnTfParser/test/TestMultiInputsOutputs.cpp @@ -9,14 +9,14 @@ BOOST_AUTO_TEST_SUITE(TensorflowParser) -struct MultiInputsOutputsFixture : public ParserPrototxtFixture +struct MultiInputsOutputsFixture : public armnnUtils::ParserPrototxtFixture { MultiInputsOutputsFixture() { - // input1 = tf.placeholder(tf.float32, shape=[], name = "input1") - // input2 = tf.placeholder(tf.float32, shape = [], name = "input2") - // add1 = tf.add(input1, input2, name = "add1") - // add2 = tf.add(input1, input2, name = "add2") + // Input1 = tf.placeholder(tf.float32, shape=[], name = "input1") + // Input2 = tf.placeholder(tf.float32, shape = [], name = "input2") + // Add1 = tf.add(input1, input2, name = "add1") + // Add2 = tf.add(input1, input2, name = "add2") m_Prototext = R"( node { name: "input1" -- cgit v1.2.1