aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnTfLiteParser/test')
-rw-r--r--src/armnnTfLiteParser/test/Activations.cpp20
-rw-r--r--src/armnnTfLiteParser/test/Addition.cpp11
-rw-r--r--src/armnnTfLiteParser/test/ArgMinMax.cpp15
-rw-r--r--src/armnnTfLiteParser/test/AvgPool2D.cpp17
-rw-r--r--src/armnnTfLiteParser/test/BatchToSpaceND.cpp13
-rw-r--r--src/armnnTfLiteParser/test/Cast.cpp10
-rw-r--r--src/armnnTfLiteParser/test/Concatenation.cpp23
-rw-r--r--src/armnnTfLiteParser/test/Constant.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp21
-rw-r--r--src/armnnTfLiteParser/test/DepthToSpace.cpp9
-rw-r--r--src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp63
-rw-r--r--src/armnnTfLiteParser/test/Dequantize.cpp13
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp72
-rw-r--r--src/armnnTfLiteParser/test/Div.cpp11
-rw-r--r--src/armnnTfLiteParser/test/ElementWiseUnary.cpp18
-rw-r--r--src/armnnTfLiteParser/test/FullyConnected.cpp22
-rw-r--r--src/armnnTfLiteParser/test/Gather.cpp11
-rw-r--r--src/armnnTfLiteParser/test/GetBuffer.cpp25
-rw-r--r--src/armnnTfLiteParser/test/GetInputsOutputs.cpp56
-rw-r--r--src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp56
-rw-r--r--src/armnnTfLiteParser/test/GetTensorIds.cpp56
-rw-r--r--src/armnnTfLiteParser/test/InputOutputTensorNames.cpp51
-rw-r--r--src/armnnTfLiteParser/test/L2Normalization.cpp11
-rw-r--r--src/armnnTfLiteParser/test/LeakyRelu.cpp9
-rw-r--r--src/armnnTfLiteParser/test/LoadModel.cpp76
-rw-r--r--src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp20
-rw-r--r--src/armnnTfLiteParser/test/MaxPool2D.cpp18
-rw-r--r--src/armnnTfLiteParser/test/Maximum.cpp15
-rw-r--r--src/armnnTfLiteParser/test/Mean.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Minimum.cpp17
-rw-r--r--src/armnnTfLiteParser/test/Multiplication.cpp13
-rw-r--r--src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp35
-rw-r--r--src/armnnTfLiteParser/test/Pack.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Pad.cpp13
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp41
-rw-r--r--src/armnnTfLiteParser/test/Prelu.cpp18
-rw-r--r--src/armnnTfLiteParser/test/Quantize.cpp13
-rw-r--r--src/armnnTfLiteParser/test/Reduce.cpp11
-rw-r--r--src/armnnTfLiteParser/test/Reshape.cpp25
-rw-r--r--src/armnnTfLiteParser/test/ResizeBilinear.cpp9
-rw-r--r--src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Slice.cpp21
-rw-r--r--src/armnnTfLiteParser/test/Softmax.cpp9
-rw-r--r--src/armnnTfLiteParser/test/SpaceToBatchND.cpp13
-rw-r--r--src/armnnTfLiteParser/test/Split.cpp23
-rw-r--r--src/armnnTfLiteParser/test/SplitV.cpp15
-rw-r--r--src/armnnTfLiteParser/test/Squeeze.cpp27
-rw-r--r--src/armnnTfLiteParser/test/StridedSlice.cpp15
-rw-r--r--src/armnnTfLiteParser/test/Sub.cpp11
-rw-r--r--src/armnnTfLiteParser/test/Sum.cpp9
-rw-r--r--src/armnnTfLiteParser/test/TfLiteParser.cpp16
-rw-r--r--src/armnnTfLiteParser/test/Transpose.cpp15
-rw-r--r--src/armnnTfLiteParser/test/TransposeConv.cpp13
-rw-r--r--src/armnnTfLiteParser/test/Unpack.cpp15
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp26
55 files changed, 578 insertions, 623 deletions
diff --git a/src/armnnTfLiteParser/test/Activations.cpp b/src/armnnTfLiteParser/test/Activations.cpp
index f74c22d107..980edc4c5d 100644
--- a/src/armnnTfLiteParser/test/Activations.cpp
+++ b/src/armnnTfLiteParser/test/Activations.cpp
@@ -3,12 +3,13 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+#include <doctest/doctest.h>
+TEST_SUITE("TensorflowLiteParser_Activations")
+{
struct ActivationFixture : ParserFlatbuffersFixture
{
@@ -68,7 +69,7 @@ struct ReLuFixture : ActivationFixture
{
ReLuFixture() : ActivationFixture("RELU", "FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
+TEST_CASE_FIXTURE(ReLuFixture, "ParseReLu")
{
RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
{ 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
@@ -78,7 +79,7 @@ struct ReLu6Fixture : ActivationFixture
{
ReLu6Fixture() : ActivationFixture("RELU6", "FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
+TEST_CASE_FIXTURE(ReLu6Fixture, "ParseReLu6")
{
RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
{ 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
@@ -88,7 +89,7 @@ struct SigmoidFixture : ActivationFixture
{
SigmoidFixture() : ActivationFixture("LOGISTIC", "FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseLogistic, SigmoidFixture)
+TEST_CASE_FIXTURE(SigmoidFixture, "ParseLogistic")
{
RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 4.0f, -4.0f, 0.0f, 0.5f, -0.75f },
{0.268941f, 0.377541f, 0.982013f, 0.0179862f, 0.5f, 0.622459f, 0.320821f });
@@ -99,7 +100,7 @@ struct TanHFixture : ActivationFixture
TanHFixture() : ActivationFixture("TANH", "FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseTanH, TanHFixture)
+TEST_CASE_FIXTURE(TanHFixture, "ParseTanH")
{
RunTest<2, armnn::DataType::Float32>(0,
{ -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f },
@@ -111,7 +112,7 @@ struct EluFixture : ActivationFixture
EluFixture() : ActivationFixture("ELU", "FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseElu, EluFixture)
+TEST_CASE_FIXTURE(EluFixture, "ParseElu")
{
RunTest<2, armnn::DataType::Float32>(0,
{ -2.0f, -1.0f, -0.0f, 0.0f, 1.0f, 2.0f, 3.0f },
@@ -123,10 +124,11 @@ struct HardSwishFixture : ActivationFixture
HardSwishFixture() : ActivationFixture("HARD_SWISH", "FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseHardSwish, HardSwishFixture)
+TEST_CASE_FIXTURE(HardSwishFixture, "ParseHardSwish")
{
RunTest<2, armnn::DataType::Float32>(0,
{ -4.0f, -3.0f, -2.9f, 1.2f, 2.2f, 3.0f, 4.0f },
{ -0.0f, -0.0f, -0.04833334f, 0.84f, 1.90666667f, 3.0f, 4.0f });
}
-BOOST_AUTO_TEST_SUITE_END()
+
+}
diff --git a/src/armnnTfLiteParser/test/Addition.cpp b/src/armnnTfLiteParser/test/Addition.cpp
index deeb707a2f..d7c207f783 100644
--- a/src/armnnTfLiteParser/test/Addition.cpp
+++ b/src/armnnTfLiteParser/test/Addition.cpp
@@ -3,15 +3,16 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
+#include <doctest/doctest.h>
+
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Addition")
+{
struct AddFixture : public ParserFlatbuffersFixture
{
explicit AddFixture(const std::string & inputShape1,
@@ -95,7 +96,7 @@ struct SimpleAddFixture : AddFixture
"[ 2, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
+TEST_CASE_FIXTURE(SimpleAddFixture, "SimpleAdd")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -104,4 +105,4 @@ BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
{{"outputTensor", { 4, 6, 8, 10 }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/ArgMinMax.cpp b/src/armnnTfLiteParser/test/ArgMinMax.cpp
index ad99b48281..77574b12dc 100644
--- a/src/armnnTfLiteParser/test/ArgMinMax.cpp
+++ b/src/armnnTfLiteParser/test/ArgMinMax.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <iostream>
#include <string>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ArgMinMax")
+{
struct ArgMinMaxFixture : public ParserFlatbuffersFixture
{
explicit ArgMinMaxFixture(const std::string& operatorCode,
@@ -93,7 +92,7 @@ struct SimpleArgMaxFixture : public ArgMinMaxFixture
"[ 3, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSimpleArgMax, SimpleArgMaxFixture)
+TEST_CASE_FIXTURE(SimpleArgMaxFixture, "ParseSimpleArgMax")
{
RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
0,
@@ -109,7 +108,7 @@ struct ArgMaxFixture : public ArgMinMaxFixture
"[ 0, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseArgMax, ArgMaxFixture)
+TEST_CASE_FIXTURE(ArgMaxFixture, "ParseArgMax")
{
RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
0,
@@ -131,7 +130,7 @@ struct SimpleArgMinFixture : public ArgMinMaxFixture
"[ 3, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSimpleArgMin, SimpleArgMinFixture)
+TEST_CASE_FIXTURE(SimpleArgMinFixture, "ParseSimpleArgMin")
{
RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
0,
@@ -147,7 +146,7 @@ struct ArgMinFixture : public ArgMinMaxFixture
"[ 0, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseArgMin, ArgMinFixture)
+TEST_CASE_FIXTURE(ArgMinFixture, "ParseArgMin")
{
RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
0,
@@ -161,4 +160,4 @@ BOOST_FIXTURE_TEST_CASE(ParseArgMin, ArgMinFixture)
0, 0, 0, 0 } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
index a56e7e7362..fdab4da296 100644
--- a/src/armnnTfLiteParser/test/AvgPool2D.cpp
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -2,12 +2,11 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "ParserFlatbuffersFixture.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_AvgPool2D")
+{
struct AvgPool2DFixture : public ParserFlatbuffersFixture
{
explicit AvgPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
@@ -96,25 +95,25 @@ struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
AvgPoolLiteFixture2DOutput() : AvgPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
};
-BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixtureUint1DOutput, "AvgPoolLite1DOutput")
{
RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 });
}
-BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixtureFloat1DOutput, "AvgPoolLiteFloat1DOutput")
{
RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
}
-BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixture2DOutput, "AvgPoolLite2DOutput")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
}
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixtureFloat1DOutput, "IncorrectDataTypeError")
{
- BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+ CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/BatchToSpaceND.cpp b/src/armnnTfLiteParser/test/BatchToSpaceND.cpp
index 97f1828335..f5285f80f8 100644
--- a/src/armnnTfLiteParser/test/BatchToSpaceND.cpp
+++ b/src/armnnTfLiteParser/test/BatchToSpaceND.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_BatchToSpaceND")
+{
struct BatchToSpaceNDFixture : public ParserFlatbuffersFixture
{
explicit BatchToSpaceNDFixture(const std::string & inputShape,
@@ -105,7 +104,7 @@ struct BatchToSpaceNDFixtureTest1 : public BatchToSpaceNDFixture
"[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(BatchToSpaceNDTest1, BatchToSpaceNDFixtureTest1)
+TEST_CASE_FIXTURE(BatchToSpaceNDFixtureTest1, "BatchToSpaceNDTest1")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -142,7 +141,7 @@ struct BatchToSpaceNDFixtureTest2 : public BatchToSpaceNDFixture
"[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseBatchToSpaceNDTest2, BatchToSpaceNDFixtureTest2)
+TEST_CASE_FIXTURE(BatchToSpaceNDFixtureTest2, "ParseBatchToSpaceNDTest2")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -159,7 +158,7 @@ struct BatchToSpaceNDFixtureTest3 : public BatchToSpaceNDFixture
"[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseBatchToSpaceNDTest3, BatchToSpaceNDFixtureTest3)
+TEST_CASE_FIXTURE(BatchToSpaceNDFixtureTest3, "ParseBatchToSpaceNDTest3")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -167,4 +166,4 @@ BOOST_FIXTURE_TEST_CASE(ParseBatchToSpaceNDTest3, BatchToSpaceNDFixtureTest3)
{{ "outputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Cast.cpp b/src/armnnTfLiteParser/test/Cast.cpp
index e0f9c63e72..9971ee867b 100644
--- a/src/armnnTfLiteParser/test/Cast.cpp
+++ b/src/armnnTfLiteParser/test/Cast.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Cast")
+{
struct CastFixture : public ParserFlatbuffersFixture
{
explicit CastFixture(const std::string& inputShape,
@@ -76,7 +75,7 @@ struct SimpleCastFixture : CastFixture
"FLOAT32") {}
};
-BOOST_FIXTURE_TEST_CASE(SimpleCast, SimpleCastFixture)
+TEST_CASE_FIXTURE(SimpleCastFixture, "SimpleCast")
{
RunTest<2, armnn::DataType::Signed32 , armnn::DataType::Float32>(
0,
@@ -84,5 +83,4 @@ RunTest<2, armnn::DataType::Signed32 , armnn::DataType::Float32>(
{{"outputTensor", { 0.0f, -1.0f, 5.0f, -100.0f, 200.0f, -255.0f }}});
}
-
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Concatenation.cpp b/src/armnnTfLiteParser/test/Concatenation.cpp
index 8e31a3edb0..2407794f0d 100644
--- a/src/armnnTfLiteParser/test/Concatenation.cpp
+++ b/src/armnnTfLiteParser/test/Concatenation.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Concatenation")
+{
struct ConcatenationFixture : public ParserFlatbuffersFixture
{
explicit ConcatenationFixture(const std::string & inputShape1,
@@ -98,7 +97,7 @@ struct ConcatenationFixtureNegativeDim : ConcatenationFixture
"-3" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
+TEST_CASE_FIXTURE(ConcatenationFixtureNegativeDim, "ParseConcatenationNegativeDim")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -112,7 +111,7 @@ struct ConcatenationFixtureNCHW : ConcatenationFixture
ConcatenationFixtureNCHW() : ConcatenationFixture("[ 1, 1, 2, 2 ]", "[ 1, 1, 2, 2 ]", "[ 1, 2, 2, 2 ]", "1" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
+TEST_CASE_FIXTURE(ConcatenationFixtureNCHW, "ParseConcatenationNCHW")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -126,7 +125,7 @@ struct ConcatenationFixtureNHWC : ConcatenationFixture
ConcatenationFixtureNHWC() : ConcatenationFixture("[ 1, 1, 2, 2 ]", "[ 1, 1, 2, 2 ]", "[ 1, 1, 2, 4 ]", "3" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
+TEST_CASE_FIXTURE(ConcatenationFixtureNHWC, "ParseConcatenationNHWC")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -140,7 +139,7 @@ struct ConcatenationFixtureDim1 : ConcatenationFixture
ConcatenationFixtureDim1() : ConcatenationFixture("[ 1, 2, 3, 4 ]", "[ 1, 2, 3, 4 ]", "[ 1, 4, 3, 4 ]", "1" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
+TEST_CASE_FIXTURE(ConcatenationFixtureDim1, "ParseConcatenationDim1")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -159,7 +158,7 @@ struct ConcatenationFixtureDim3 : ConcatenationFixture
ConcatenationFixtureDim3() : ConcatenationFixture("[ 1, 2, 3, 4 ]", "[ 1, 2, 3, 4 ]", "[ 1, 2, 3, 8 ]", "3" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
+TEST_CASE_FIXTURE(ConcatenationFixtureDim3, "ParseConcatenationDim3")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -194,7 +193,7 @@ struct ConcatenationFixture3DDim0 : ConcatenationFixture
ConcatenationFixture3DDim0() : ConcatenationFixture("[ 1, 2, 3]", "[ 2, 2, 3]", "[ 3, 2, 3]", "0" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim0, ConcatenationFixture3DDim0)
+TEST_CASE_FIXTURE(ConcatenationFixture3DDim0, "ParseConcatenation3DDim0")
{
RunTest<3, armnn::DataType::QAsymmU8>(
0,
@@ -211,7 +210,7 @@ struct ConcatenationFixture3DDim1 : ConcatenationFixture
ConcatenationFixture3DDim1() : ConcatenationFixture("[ 1, 2, 3]", "[ 1, 4, 3]", "[ 1, 6, 3]", "1" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim1, ConcatenationFixture3DDim1)
+TEST_CASE_FIXTURE(ConcatenationFixture3DDim1, "ParseConcatenation3DDim1")
{
RunTest<3, armnn::DataType::QAsymmU8>(
0,
@@ -228,7 +227,7 @@ struct ConcatenationFixture3DDim2 : ConcatenationFixture
ConcatenationFixture3DDim2() : ConcatenationFixture("[ 1, 2, 3]", "[ 1, 2, 6]", "[ 1, 2, 9]", "2" ) {}
};
-BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim2, ConcatenationFixture3DDim2)
+TEST_CASE_FIXTURE(ConcatenationFixture3DDim2, "ParseConcatenation3DDim2")
{
RunTest<3, armnn::DataType::QAsymmU8>(
0,
@@ -240,4 +239,4 @@ BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim2, ConcatenationFixture3DDim2)
3, 4, 5, 12, 13, 14, 15, 16, 17 } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Constant.cpp b/src/armnnTfLiteParser/test/Constant.cpp
index bfb76a9791..641fd7ba56 100644
--- a/src/armnnTfLiteParser/test/Constant.cpp
+++ b/src/armnnTfLiteParser/test/Constant.cpp
@@ -3,7 +3,6 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
@@ -12,8 +11,8 @@
using armnnTfLiteParser::TfLiteParserImpl;
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Constant")
+{
struct ConstantAddFixture : public ParserFlatbuffersFixture
{
explicit ConstantAddFixture(const std::string & inputShape,
@@ -101,7 +100,7 @@ struct SimpleConstantAddFixture : ConstantAddFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(SimpleConstantAdd, SimpleConstantAddFixture)
+TEST_CASE_FIXTURE(SimpleConstantAddFixture, "SimpleConstantAdd")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -110,4 +109,4 @@ BOOST_FIXTURE_TEST_CASE(SimpleConstantAdd, SimpleConstantAddFixture)
);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index a480a4ec3d..dc5e6974ad 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -3,13 +3,12 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <sstream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Conv2D")
+{
struct SimpleConv2DFixture : public ParserFlatbuffersFixture
{
explicit SimpleConv2DFixture()
@@ -87,7 +86,7 @@ struct SimpleConv2DFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
+TEST_CASE_FIXTURE(SimpleConv2DFixture, "ParseSimpleConv2D")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -217,7 +216,7 @@ struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(SimpleConv2DWithBiasesFixture, "ParseConv2DWithBias")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -247,7 +246,7 @@ struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseDynamicConv2DWithBias, DynamicConv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(DynamicConv2DWithBiasesFixture, "ParseDynamicConv2DWithBias")
{
RunTest<4,
armnn::DataType::QAsymmU8,
@@ -288,7 +287,7 @@ struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseConv2D_112x112_out, Conv2DShapeTestFixture )
+TEST_CASE_FIXTURE(Conv2DShapeTestFixture, "ParseConv2D_112x112_out")
{
}
@@ -310,7 +309,7 @@ struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(ReluConv2DWithBiasesFixture, "ParseConv2DAndReluWithBias")
{
uint8_t bias = 16;
uint8_t outZero = 20;
@@ -353,7 +352,7 @@ struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(Relu6Conv2DWithBiasesFixture, "ParseConv2DAndRelu6WithBias")
{
uint8_t relu6Min = 6 / 2; // divide by output scale
@@ -642,7 +641,7 @@ struct PerChannelConv2DFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE( ParsePerChannelConv2D, PerChannelConv2DFixture )
+TEST_CASE_FIXTURE(PerChannelConv2DFixture, "ParsePerChannelConv2D")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -664,4 +663,4 @@ BOOST_FIXTURE_TEST_CASE( ParsePerChannelConv2D, PerChannelConv2DFixture )
});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/DepthToSpace.cpp b/src/armnnTfLiteParser/test/DepthToSpace.cpp
index efd1207297..6b7e9c52ba 100644
--- a/src/armnnTfLiteParser/test/DepthToSpace.cpp
+++ b/src/armnnTfLiteParser/test/DepthToSpace.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_DepthToSpace")
+{
struct DepthToSpaceFixture : public ParserFlatbuffersFixture
{
explicit DepthToSpaceFixture(const std::string& inputShape,
@@ -81,7 +80,7 @@ struct SimpleDepthToSpaceFixture : public DepthToSpaceFixture
SimpleDepthToSpaceFixture() : DepthToSpaceFixture("[ 1, 2, 2, 4 ]", "[ 1, 4, 4, 1 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthToSpace, SimpleDepthToSpaceFixture)
+TEST_CASE_FIXTURE(SimpleDepthToSpaceFixture, "ParseDepthToSpace")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -95,4 +94,4 @@ BOOST_FIXTURE_TEST_CASE(ParseDepthToSpace, SimpleDepthToSpaceFixture)
11.f, 12.f, 15.f, 16.f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
index 95ad2d5ee9..757b23e08f 100644
--- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_DepthwiseConvolution2D")
+{
struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
{
explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
@@ -131,7 +130,7 @@ struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dSameFixture, "ParseDepthwiseConv2DSame")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -158,7 +157,7 @@ struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dValidFixture, "ParseDepthwiseConv2DValid")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -183,7 +182,7 @@ struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dSameBiasFixture, "ParseDepthwiseConv2DSameBias")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -210,7 +209,7 @@ struct DynamicDepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixt
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseDynamicDepthwiseConv2DSameBias, DynamicDepthwiseConvolution2dSameBiasFixture)
+TEST_CASE_FIXTURE(DynamicDepthwiseConvolution2dSameBiasFixture, "ParseDynamicDepthwiseConv2DSameBias")
{
RunTest<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(0,
{ { "inputTensor", { 0, 1, 2,
@@ -365,7 +364,7 @@ struct DepthwiseConvolution2dNoQuantFixture : DepthwiseConvolution2dFixture2
};
// No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DNoQuant, DepthwiseConvolution2dNoQuantFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dNoQuantFixture, "ParseDepthwiseConv2DNoQuant")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -397,7 +396,7 @@ struct DepthwiseConvolution2dNoChannelQuantFixture : DepthwiseConvolution2dFixtu
};
// Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterNoChannelQuant, DepthwiseConvolution2dNoChannelQuantFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dNoChannelQuantFixture, "ParseDepthwiseConv2DFilterNoChannelQuant")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -432,8 +431,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuantFixture : DepthwiseConvolutio
};
// Weights are per channel quantized but all scales are set to the same value
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant,
- DepthwiseConvolution2dWeightsPerChannelQuantFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuantFixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -468,8 +467,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant1Fixture : DepthwiseConvoluti
};
// Uses per channel quantization on weights all scales are different in this test
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1,
- DepthwiseConvolution2dWeightsPerChannelQuant1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -506,8 +505,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant2Fixture : DepthwiseConvoluti
// Uses per channel quantization on weights all scales are different in this test
// Uses different shape for weights and input compared to the other tests above
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant2,
- DepthwiseConvolution2dWeightsPerChannelQuant2Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant2Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant2")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -555,8 +554,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant4Fixture : DepthwiseConvoluti
};
// Test for depthwise_multiplier different to one (M > 1)
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4,
- DepthwiseConvolution2dWeightsPerChannelQuant4Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -616,8 +615,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant6Fixture : DepthwiseConvoluti
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant6,
- DepthwiseConvolution2dWeightsPerChannelQuant6Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant6Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant6")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -672,8 +671,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture : DepthwiseConvolu
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1,
- DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -712,8 +711,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture : DepthwiseConvolu
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2,
- DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -759,8 +758,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture : DepthwiseConvolu
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1,
- DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -821,8 +820,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture : DepthwiseConvolu
};
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2,
- DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -887,8 +886,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture : DepthwiseConvolu
};
// Test for depthwise_multiplier different to one (M > 1)
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5,
- DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -947,8 +946,8 @@ struct DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture : DepthwiseConvo
};
// Test for depthwise_multiplier different to one (M > 1)
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1,
- DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture,
+ "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -974,4 +973,4 @@ BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1,
3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
index 663f2ca823..6de6fe543b 100644
--- a/src/armnnTfLiteParser/test/Dequantize.cpp
+++ b/src/armnnTfLiteParser/test/Dequantize.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Dequantize")
+{
struct DequantizeFixture : public ParserFlatbuffersFixture
{
explicit DequantizeFixture(const std::string & inputShape,
@@ -80,7 +79,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
"UINT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
+ TEST_CASE_FIXTURE(SimpleDequantizeFixtureQAsymm8, "SimpleDequantizeQAsymm8")
{
RunTest<2, armnn::DataType::QAsymmU8 , armnn::DataType::Float32>(
0,
@@ -95,7 +94,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
"INT16") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
+ TEST_CASE_FIXTURE(SimpleDequantizeFixtureQSymm16, "SimpleDequantizeQsymm16")
{
RunTest<2, armnn::DataType::QSymmS16 , armnn::DataType::Float32>(
0,
@@ -110,7 +109,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
"INT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymmS8, SimpleDequantizeFixtureQAsymmS8)
+ TEST_CASE_FIXTURE(SimpleDequantizeFixtureQAsymmS8, "SimpleDequantizeQAsymmS8")
{
RunTest<2, armnn::DataType::QAsymmS8 , armnn::DataType::Float32>(
0,
@@ -118,4 +117,4 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index 304520c24f..e7ef7402f2 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -12,10 +12,8 @@
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <QuantizeHelper.hpp>
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_DetectionPostProcess")
+{
struct DetectionPostProcessFixture : ParserFlatbuffersFixture
{
explicit DetectionPostProcessFixture(const std::string& custom_options)
@@ -161,7 +159,7 @@ public:
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCustomOptions )
+TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions, "ParseDetectionPostProcess")
{
Setup();
@@ -223,7 +221,7 @@ BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCus
RunTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(0, input, output);
}
-BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPostProcessCustomOptions)
+TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions, "DetectionPostProcessGraphStructureTest")
{
/*
Inputs: box_encodings scores
@@ -244,47 +242,47 @@ BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPo
armnn::Graph& graph = GetGraphForTesting(optimized.get());
// Check the number of layers in the graph
- BOOST_TEST((graph.GetNumInputs() == 2));
- BOOST_TEST((graph.GetNumOutputs() == 4));
- BOOST_TEST((graph.GetNumLayers() == 7));
+ CHECK((graph.GetNumInputs() == 2));
+ CHECK((graph.GetNumOutputs() == 4));
+ CHECK((graph.GetNumLayers() == 7));
// Input layers
armnn::Layer* boxEncodingLayer = GetFirstLayerWithName(graph, "box_encodings");
- BOOST_TEST((boxEncodingLayer->GetType() == armnn::LayerType::Input));
- BOOST_TEST(CheckNumberOfInputSlot(boxEncodingLayer, 0));
- BOOST_TEST(CheckNumberOfOutputSlot(boxEncodingLayer, 1));
+ CHECK((boxEncodingLayer->GetType() == armnn::LayerType::Input));
+ CHECK(CheckNumberOfInputSlot(boxEncodingLayer, 0));
+ CHECK(CheckNumberOfOutputSlot(boxEncodingLayer, 1));
armnn::Layer* scoresLayer = GetFirstLayerWithName(graph, "scores");
- BOOST_TEST((scoresLayer->GetType() == armnn::LayerType::Input));
- BOOST_TEST(CheckNumberOfInputSlot(scoresLayer, 0));
- BOOST_TEST(CheckNumberOfOutputSlot(scoresLayer, 1));
+ CHECK((scoresLayer->GetType() == armnn::LayerType::Input));
+ CHECK(CheckNumberOfInputSlot(scoresLayer, 0));
+ CHECK(CheckNumberOfOutputSlot(scoresLayer, 1));
// DetectionPostProcess layer
armnn::Layer* detectionPostProcessLayer = GetFirstLayerWithName(graph, "DetectionPostProcess:0:0");
- BOOST_TEST((detectionPostProcessLayer->GetType() == armnn::LayerType::DetectionPostProcess));
- BOOST_TEST(CheckNumberOfInputSlot(detectionPostProcessLayer, 2));
- BOOST_TEST(CheckNumberOfOutputSlot(detectionPostProcessLayer, 4));
+ CHECK((detectionPostProcessLayer->GetType() == armnn::LayerType::DetectionPostProcess));
+ CHECK(CheckNumberOfInputSlot(detectionPostProcessLayer, 2));
+ CHECK(CheckNumberOfOutputSlot(detectionPostProcessLayer, 4));
// Output layers
armnn::Layer* detectionBoxesLayer = GetFirstLayerWithName(graph, "detection_boxes");
- BOOST_TEST((detectionBoxesLayer->GetType() == armnn::LayerType::Output));
- BOOST_TEST(CheckNumberOfInputSlot(detectionBoxesLayer, 1));
- BOOST_TEST(CheckNumberOfOutputSlot(detectionBoxesLayer, 0));
+ CHECK((detectionBoxesLayer->GetType() == armnn::LayerType::Output));
+ CHECK(CheckNumberOfInputSlot(detectionBoxesLayer, 1));
+ CHECK(CheckNumberOfOutputSlot(detectionBoxesLayer, 0));
armnn::Layer* detectionClassesLayer = GetFirstLayerWithName(graph, "detection_classes");
- BOOST_TEST((detectionClassesLayer->GetType() == armnn::LayerType::Output));
- BOOST_TEST(CheckNumberOfInputSlot(detectionClassesLayer, 1));
- BOOST_TEST(CheckNumberOfOutputSlot(detectionClassesLayer, 0));
+ CHECK((detectionClassesLayer->GetType() == armnn::LayerType::Output));
+ CHECK(CheckNumberOfInputSlot(detectionClassesLayer, 1));
+ CHECK(CheckNumberOfOutputSlot(detectionClassesLayer, 0));
armnn::Layer* detectionScoresLayer = GetFirstLayerWithName(graph, "detection_scores");
- BOOST_TEST((detectionScoresLayer->GetType() == armnn::LayerType::Output));
- BOOST_TEST(CheckNumberOfInputSlot(detectionScoresLayer, 1));
- BOOST_TEST(CheckNumberOfOutputSlot(detectionScoresLayer, 0));
+ CHECK((detectionScoresLayer->GetType() == armnn::LayerType::Output));
+ CHECK(CheckNumberOfInputSlot(detectionScoresLayer, 1));
+ CHECK(CheckNumberOfOutputSlot(detectionScoresLayer, 0));
armnn::Layer* numDetectionsLayer = GetFirstLayerWithName(graph, "num_detections");
- BOOST_TEST((numDetectionsLayer->GetType() == armnn::LayerType::Output));
- BOOST_TEST(CheckNumberOfInputSlot(numDetectionsLayer, 1));
- BOOST_TEST(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
+ CHECK((numDetectionsLayer->GetType() == armnn::LayerType::Output));
+ CHECK(CheckNumberOfInputSlot(numDetectionsLayer, 1));
+ CHECK(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
// Check the connections
armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QAsymmU8, 1, 1);
@@ -296,12 +294,12 @@ BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPo
armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1} ), armnn::DataType::Float32, 0, 0);
- BOOST_TEST(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
- BOOST_TEST(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
- BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionBoxesLayer, 0, 0, detectionBoxesTensor));
- BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionClassesLayer, 1, 0, detectionClassesTensor));
- BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionScoresLayer, 2, 0, detectionScoresTensor));
- BOOST_TEST(IsConnected(detectionPostProcessLayer, numDetectionsLayer, 3, 0, numDetectionsTensor));
+ CHECK(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
+ CHECK(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
+ CHECK(IsConnected(detectionPostProcessLayer, detectionBoxesLayer, 0, 0, detectionBoxesTensor));
+ CHECK(IsConnected(detectionPostProcessLayer, detectionClassesLayer, 1, 0, detectionClassesTensor));
+ CHECK(IsConnected(detectionPostProcessLayer, detectionScoresLayer, 2, 0, detectionScoresTensor));
+ CHECK(IsConnected(detectionPostProcessLayer, numDetectionsLayer, 3, 0, numDetectionsTensor));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Div.cpp b/src/armnnTfLiteParser/test/Div.cpp
index 10be29d755..736e821a09 100644
--- a/src/armnnTfLiteParser/test/Div.cpp
+++ b/src/armnnTfLiteParser/test/Div.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Div")
+{
struct DivFixture : public ParserFlatbuffersFixture
{
explicit DivFixture(const std::string & inputShape1,
@@ -92,7 +91,7 @@ struct SimpleDivFixture : public DivFixture
SimpleDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseDiv, SimpleDivFixture)
+TEST_CASE_FIXTURE(SimpleDivFixture, "ParseDiv")
{
using armnn::DataType;
float Inf = std::numeric_limits<float>::infinity();
@@ -118,7 +117,7 @@ struct DynamicDivFixture : public DivFixture
DynamicDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseDynamicDiv, DynamicDivFixture)
+TEST_CASE_FIXTURE(DynamicDivFixture, "ParseDynamicDiv")
{
using armnn::DataType;
float Inf = std::numeric_limits<float>::infinity();
@@ -138,4 +137,4 @@ BOOST_FIXTURE_TEST_CASE(ParseDynamicDiv, DynamicDivFixture)
1.0f, 1.0f, -1.0f } } }, true);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ElementWiseUnary.cpp b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
index dc236d2637..21718d8d16 100644
--- a/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
+++ b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
@@ -3,14 +3,13 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ElementwiseUnary")
+{
struct ElementWiseUnaryFixture : public ParserFlatbuffersFixture
{
explicit ElementWiseUnaryFixture(const std::string& operatorCode,
@@ -75,7 +74,7 @@ struct SimpleAbsFixture : public ElementWiseUnaryFixture
SimpleAbsFixture() : ElementWiseUnaryFixture("ABS", "FLOAT32", "[ 2, 2 ]", "[ 2, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseAbs, SimpleAbsFixture)
+TEST_CASE_FIXTURE(SimpleAbsFixture, "ParseAbs")
{
std::vector<float> inputValues
{
@@ -99,7 +98,7 @@ struct SimpleExpFixture : public ElementWiseUnaryFixture
SimpleExpFixture() : ElementWiseUnaryFixture("EXP", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseExp, SimpleExpFixture)
+TEST_CASE_FIXTURE(SimpleExpFixture, "ParseExp")
{
RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, 2.0f,
3.0f, 4.0f, 5.0f} }},
@@ -112,7 +111,7 @@ struct SimpleLogicalNotFixture : public ElementWiseUnaryFixture
SimpleLogicalNotFixture() : ElementWiseUnaryFixture("LOGICAL_NOT", "BOOL", "[ 1, 1, 1, 4 ]", "[ 1, 1, 1, 4 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseLogicalNot, SimpleLogicalNotFixture)
+TEST_CASE_FIXTURE(SimpleLogicalNotFixture, "ParseLogicalNot")
{
RunTest<4, armnn::DataType::Boolean>(0, {{ "inputTensor", { 0, 1, 0, 1 } }},
{{ "outputTensor",{ 1, 0, 1, 0 } } });
@@ -123,7 +122,7 @@ struct SimpleNegFixture : public ElementWiseUnaryFixture
SimpleNegFixture() : ElementWiseUnaryFixture("NEG", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseNeg, SimpleNegFixture)
+TEST_CASE_FIXTURE(SimpleNegFixture, "ParseNeg")
{
RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, -2.0f,
20.0855185f, -54.5980834f, 5.0f} }},
@@ -136,7 +135,7 @@ struct SimpleRsqrtFixture : public ElementWiseUnaryFixture
SimpleRsqrtFixture() : ElementWiseUnaryFixture("RSQRT", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseRsqrt, SimpleRsqrtFixture)
+TEST_CASE_FIXTURE(SimpleRsqrtFixture, "ParseRsqrt")
{
RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 4.0f, 16.0f,
25.0f, 64.0f, 100.0f } }},
@@ -144,5 +143,4 @@ BOOST_FIXTURE_TEST_CASE(ParseRsqrt, SimpleRsqrtFixture)
0.2f, 0.125f, 0.1f} }});
}
-
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index 1ce1b2f74f..521ab34b7a 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -3,14 +3,13 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_FullyConnected")
+{
struct FullyConnectedFixture : public ParserFlatbuffersFixture
{
explicit FullyConnectedFixture(const std::string& inputShape,
@@ -122,7 +121,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithNoBiasFixture, "FullyConnectedWithNoBias")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -142,7 +141,7 @@ struct FullyConnectedWithBiasFixture : FullyConnectedFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithBiasFixture, "ParseFullyConnectedWithBias")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -162,7 +161,7 @@ struct FullyConnectedWithBiasMultipleOutputsFixture : FullyConnectedFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(FullyConnectedWithBiasMultipleOutputs, FullyConnectedWithBiasMultipleOutputsFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithBiasMultipleOutputsFixture, "FullyConnectedWithBiasMultipleOutputs")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -182,9 +181,8 @@ struct DynamicFullyConnectedWithBiasMultipleOutputsFixture : FullyConnectedFixtu
{ }
};
-BOOST_FIXTURE_TEST_CASE(
- DynamicFullyConnectedWithBiasMultipleOutputs,
- DynamicFullyConnectedWithBiasMultipleOutputsFixture)
+TEST_CASE_FIXTURE(
+ DynamicFullyConnectedWithBiasMultipleOutputsFixture, "DynamicFullyConnectedWithBiasMultipleOutputs")
{
RunTest<2,
armnn::DataType::QAsymmU8,
@@ -327,7 +325,7 @@ struct FullyConnectedNonConstWeights : FullyConnectedNonConstWeightsFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeights, FullyConnectedNonConstWeights)
+TEST_CASE_FIXTURE(FullyConnectedNonConstWeights, "ParseFullyConnectedNonConstWeights")
{
RunTest<2, armnn::DataType::QAsymmS8,
armnn::DataType::Signed32,
@@ -348,7 +346,7 @@ struct FullyConnectedNonConstWeightsNoBias : FullyConnectedNonConstWeightsFixtur
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeightsNoBias, FullyConnectedNonConstWeightsNoBias)
+TEST_CASE_FIXTURE(FullyConnectedNonConstWeightsNoBias, "ParseFullyConnectedNonConstWeightsNoBias")
{
RunTest<2, armnn::DataType::QAsymmS8,
armnn::DataType::QAsymmS8>(
@@ -357,4 +355,4 @@ BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeightsNoBias, FullyConnected
{{"output", { 20 }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Gather.cpp b/src/armnnTfLiteParser/test/Gather.cpp
index 498d56d254..3c0bd9d6c5 100644
--- a/src/armnnTfLiteParser/test/Gather.cpp
+++ b/src/armnnTfLiteParser/test/Gather.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Gather")
+{
struct GatherFixture : public ParserFlatbuffersFixture
{
explicit GatherFixture(const std::string& paramsShape,
@@ -95,7 +94,7 @@ struct SimpleGatherFixture : public GatherFixture
SimpleGatherFixture() : GatherFixture("[ 5, 2 ]", "[ 3, 2 ]", "[ 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseGather, SimpleGatherFixture)
+TEST_CASE_FIXTURE(SimpleGatherFixture, "ParseGather")
{
RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>
(0,
@@ -109,7 +108,7 @@ struct GatherUint8Fixture : public GatherFixture
GatherUint8Fixture() : GatherFixture("[ 8 ]", "[ 3 ]", "[ 3 ]", "UINT8") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseGatherUint8, GatherUint8Fixture)
+TEST_CASE_FIXTURE(GatherUint8Fixture, "ParseGatherUint8")
{
RunTest<1, armnn::DataType::QAsymmU8, armnn::DataType::Signed32, armnn::DataType::QAsymmU8>
(0,
@@ -118,4 +117,4 @@ BOOST_FIXTURE_TEST_CASE(ParseGatherUint8, GatherUint8Fixture)
{{ "outputTensor", { 8, 7, 6 }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/GetBuffer.cpp b/src/armnnTfLiteParser/test/GetBuffer.cpp
index 0e72522c79..9dfc9fff13 100644
--- a/src/armnnTfLiteParser/test/GetBuffer.cpp
+++ b/src/armnnTfLiteParser/test/GetBuffer.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <sstream>
using armnnTfLiteParser::TfLiteParserImpl;
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetBuffer")
+{
struct GetBufferFixture : public ParserFlatbuffersFixture
{
explicit GetBufferFixture()
@@ -93,12 +92,12 @@ struct GetBufferFixture : public ParserFlatbuffersFixture
{
for(long unsigned int i=0; i<bufferValues.size(); i++)
{
- BOOST_CHECK_EQUAL(TfLiteParserImpl::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
+ CHECK_EQ(TfLiteParserImpl::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
}
}
};
-BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
+TEST_CASE_FIXTURE(GetBufferFixture, "GetBufferCheckContents")
{
//Check contents of buffer are correct
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
@@ -107,23 +106,23 @@ BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
CheckBufferContents(model, bufferValues, 2);
}
-BOOST_FIXTURE_TEST_CASE(GetBufferCheckEmpty, GetBufferFixture)
+TEST_CASE_FIXTURE(GetBufferFixture, "GetBufferCheckEmpty")
{
//Check if test fixture buffers are empty or not
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 0)->data.empty());
- BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 1)->data.empty());
- BOOST_CHECK(!TfLiteParserImpl::GetBuffer(model, 2)->data.empty());
- BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 3)->data.empty());
+ CHECK(TfLiteParserImpl::GetBuffer(model, 0)->data.empty());
+ CHECK(TfLiteParserImpl::GetBuffer(model, 1)->data.empty());
+ CHECK(!TfLiteParserImpl::GetBuffer(model, 2)->data.empty());
+ CHECK(TfLiteParserImpl::GetBuffer(model, 3)->data.empty());
}
-BOOST_FIXTURE_TEST_CASE(GetBufferCheckParseException, GetBufferFixture)
+TEST_CASE_FIXTURE(GetBufferFixture, "GetBufferCheckParseException")
{
//Check if armnn::ParseException thrown when invalid buffer index used
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetBuffer(model, 4), armnn::Exception);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetBuffer(model, 4), armnn::Exception);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
index 894de0c3a0..398217f42b 100644
--- a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
@@ -2,15 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
using armnnTfLiteParser::TfLiteParserImpl;
using ModelPtr = TfLiteParserImpl::ModelPtr;
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetInputsOutputs")
+{
struct GetInputsOutputsMainFixture : public ParserFlatbuffersFixture
{
explicit GetInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
@@ -150,100 +150,100 @@ struct GetInputsOutputsFixture : GetInputsOutputsMainFixture
GetInputsOutputsFixture() : GetInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptyInputsOutputsFixture, "GetEmptyInputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
- BOOST_CHECK_EQUAL(0, tensors.size());
+ CHECK_EQ(0, tensors.size());
}
-BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptyInputsOutputsFixture, "GetEmptyOutputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
- BOOST_CHECK_EQUAL(0, tensors.size());
+ CHECK_EQ(0, tensors.size());
}
-BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
- BOOST_CHECK_EQUAL(1, tensors.size());
+ CHECK_EQ(1, tensors.size());
CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
"InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
}
-BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
- BOOST_CHECK_EQUAL(1, tensors.size());
+ CHECK_EQ(1, tensors.size());
CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
"OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
}
-BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputsMultipleInputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 1, 0);
- BOOST_CHECK_EQUAL(2, tensors.size());
+ CHECK_EQ(2, tensors.size());
CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
"ConvInputTensor", { }, { }, { 1.0f }, { 0 });
CheckTensors(tensors[1], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 2,
"filterTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
}
-BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputs2")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 1, 0);
- BOOST_CHECK_EQUAL(1, tensors.size());
+ CHECK_EQ(1, tensors.size());
CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
"ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
}
-BOOST_AUTO_TEST_CASE(GetInputsNullModel)
+TEST_CASE("GetInputsNullModel")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
}
-BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
+TEST_CASE("GetOutputsNullModel")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputsInvalidSubgraph")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputsInvalidSubgraph")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputsInvalidOperator")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputsInvalidOperator")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
index 100e8e96d5..5c64449c34 100644
--- a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
@@ -10,8 +10,8 @@ using armnnTfLiteParser::TfLiteParserImpl;
using ModelPtr = TfLiteParserImpl::ModelPtr;
using TensorRawPtr = TfLiteParserImpl::TensorRawPtr;
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetSubgraphInputsOutputs")
+{
struct GetSubgraphInputsOutputsMainFixture : public ParserFlatbuffersFixture
{
explicit GetSubgraphInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
@@ -151,88 +151,88 @@ struct GetSubgraphInputsOutputsFixture : GetSubgraphInputsOutputsMainFixture
GetSubgraphInputsOutputsFixture() : GetSubgraphInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphInputs, GetEmptySubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptySubgraphInputsOutputsFixture, "GetEmptySubgraphInputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
- BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+ CHECK_EQ(0, subgraphTensors.size());
}
-BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphOutputs, GetEmptySubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptySubgraphInputsOutputsFixture, "GetEmptySubgraphOutputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
- BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+ CHECK_EQ(0, subgraphTensors.size());
}
-BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphInputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
- BOOST_CHECK_EQUAL(1, subgraphTensors.size());
- BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+ CHECK_EQ(1, subgraphTensors.size());
+ CHECK_EQ(1, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
"InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
}
-BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphOutputsSimpleQuantized")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
- BOOST_CHECK_EQUAL(1, subgraphTensors.size());
- BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+ CHECK_EQ(1, subgraphTensors.size());
+ CHECK_EQ(0, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
"OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
}
-BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphInputsEmptyMinMax")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 1);
- BOOST_CHECK_EQUAL(1, subgraphTensors.size());
- BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+ CHECK_EQ(1, subgraphTensors.size());
+ CHECK_EQ(0, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
"ConvInputTensor", { }, { }, { 1.0f }, { 0 });
}
-BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphOutputs")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 1);
- BOOST_CHECK_EQUAL(1, subgraphTensors.size());
- BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+ CHECK_EQ(1, subgraphTensors.size());
+ CHECK_EQ(1, subgraphTensors[0].first);
CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
"ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
}
-BOOST_AUTO_TEST_CASE(GetSubgraphInputsNullModel)
+TEST_CASE("GetSubgraphInputsNullModel")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
}
-BOOST_AUTO_TEST_CASE(GetSubgraphOutputsNullModel)
+TEST_CASE("GetSubgraphOutputsNullModel")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphInputsInvalidSubgraph")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(model, 2), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphInputs(model, 2), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphOutputsInvalidSubgraph")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(model, 2), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphOutputs(model, 2), armnn::ParseException);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetTensorIds.cpp b/src/armnnTfLiteParser/test/GetTensorIds.cpp
index f45f6e66b9..5b17dcd037 100644
--- a/src/armnnTfLiteParser/test/GetTensorIds.cpp
+++ b/src/armnnTfLiteParser/test/GetTensorIds.cpp
@@ -2,15 +2,15 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
using armnnTfLiteParser::TfLiteParserImpl;
using ModelPtr = TfLiteParserImpl::ModelPtr;
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetTensorIds")
+{
struct GetTensorIdsFixture : public ParserFlatbuffersFixture
{
explicit GetTensorIdsFixture(const std::string& inputs, const std::string& outputs)
@@ -89,82 +89,82 @@ struct GetInputOutputTensorIdsFixture : GetTensorIdsFixture
GetInputOutputTensorIdsFixture() : GetTensorIdsFixture("[ 0, 1, 2 ]", "[ 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
+TEST_CASE_FIXTURE(GetEmptyTensorIdsFixture, "GetEmptyInputTensorIds")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
std::vector<int32_t> expectedIds = { };
std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
- BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
- inputTensorIds.begin(), inputTensorIds.end());
+ CHECK(std::equal(expectedIds.begin(), expectedIds.end(),
+ inputTensorIds.begin(), inputTensorIds.end()));
}
-BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
+TEST_CASE_FIXTURE(GetEmptyTensorIdsFixture, "GetEmptyOutputTensorIds")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
std::vector<int32_t> expectedIds = { };
std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
- BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
- outputTensorIds.begin(), outputTensorIds.end());
+ CHECK(std::equal(expectedIds.begin(), expectedIds.end(),
+ outputTensorIds.begin(), outputTensorIds.end()));
}
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIds")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
- BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
- inputTensorIds.begin(), inputTensorIds.end());
+ CHECK(std::equal(expectedInputIds.begin(), expectedInputIds.end(),
+ inputTensorIds.begin(), inputTensorIds.end()));
}
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetOutputTensorIds")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
std::vector<int32_t> expectedOutputIds = { 3 };
std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
- BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
- outputTensorIds.begin(), outputTensorIds.end());
+ CHECK(std::equal(expectedOutputIds.begin(), expectedOutputIds.end(),
+ outputTensorIds.begin(), outputTensorIds.end()));
}
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIdsNullModel")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetOutputTensorIdsNullModel")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIdsInvalidSubgraph")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE( GetInputOutputTensorIdsFixture, "GetOutputTensorIdsInvalidSubgraph")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIdsInvalidOperator")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
}
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetOutputTensorIdsInvalidOperator")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
- BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
+ CHECK_THROWS_AS(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
index d7a4371548..97d9381413 100644
--- a/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
+++ b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
@@ -3,12 +3,11 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_InputOutputTensorNames")
+{
struct EmptyNetworkFixture : public ParserFlatbuffersFixture
{
explicit EmptyNetworkFixture() {
@@ -21,12 +20,12 @@ struct EmptyNetworkFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(EmptyNetworkHasNoInputsAndOutputs, EmptyNetworkFixture)
+TEST_CASE_FIXTURE(EmptyNetworkFixture, "EmptyNetworkHasNoInputsAndOutputs")
{
Setup();
- BOOST_TEST(m_Parser->GetSubgraphCount() == 1);
- BOOST_TEST(m_Parser->GetSubgraphInputTensorNames(0).size() == 0);
- BOOST_TEST(m_Parser->GetSubgraphOutputTensorNames(0).size() == 0);
+ CHECK(m_Parser->GetSubgraphCount() == 1);
+ CHECK(m_Parser->GetSubgraphInputTensorNames(0).size() == 0);
+ CHECK(m_Parser->GetSubgraphOutputTensorNames(0).size() == 0);
}
struct MissingTensorsFixture : public ParserFlatbuffersFixture
@@ -45,10 +44,10 @@ struct MissingTensorsFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(MissingTensorsThrowException, MissingTensorsFixture)
+TEST_CASE_FIXTURE(MissingTensorsFixture, "MissingTensorsThrowException")
{
// this throws because it cannot do the input output tensor connections
- BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+ CHECK_THROWS_AS(Setup(), armnn::ParseException);
}
struct InvalidTensorsFixture : public ParserFlatbuffersFixture
@@ -78,11 +77,11 @@ struct InvalidTensorsFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(InvalidTensorsThrowException, InvalidTensorsFixture)
+TEST_CASE_FIXTURE(InvalidTensorsFixture, "InvalidTensorsThrowException")
{
// Tensor numDimensions must be less than or equal to MaxNumOfTensorDimensions
static_assert(armnn::MaxNumOfTensorDimensions == 5, "Please update InvalidTensorsFixture");
- BOOST_CHECK_THROW(Setup(), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(Setup(), armnn::InvalidArgumentException);
}
struct ValidTensorsFixture : public ParserFlatbuffersFixture
@@ -128,22 +127,22 @@ struct ValidTensorsFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(GetValidInputOutputTensorNames, ValidTensorsFixture)
+TEST_CASE_FIXTURE(ValidTensorsFixture, "GetValidInputOutputTensorNames")
{
Setup();
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0).size(), 1u);
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[0], "In");
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
+ CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0).size(), 1u);
+ CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
+ CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0)[0], "In");
+ CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
}
-BOOST_FIXTURE_TEST_CASE(ThrowIfSubgraphIdInvalidForInOutNames, ValidTensorsFixture)
+TEST_CASE_FIXTURE(ValidTensorsFixture, "ThrowIfSubgraphIdInvalidForInOutNames")
{
Setup();
// these throw because of the invalid subgraph id
- BOOST_CHECK_THROW(m_Parser->GetSubgraphInputTensorNames(1), armnn::ParseException);
- BOOST_CHECK_THROW(m_Parser->GetSubgraphOutputTensorNames(1), armnn::ParseException);
+ CHECK_THROWS_AS(m_Parser->GetSubgraphInputTensorNames(1), armnn::ParseException);
+ CHECK_THROWS_AS(m_Parser->GetSubgraphOutputTensorNames(1), armnn::ParseException);
}
struct Rank0TensorFixture : public ParserFlatbuffersFixture
@@ -185,14 +184,14 @@ struct Rank0TensorFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(Rank0Tensor, Rank0TensorFixture)
+TEST_CASE_FIXTURE(Rank0TensorFixture, "Rank0Tensor")
{
Setup();
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0).size(), 2u);
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[0], "In0");
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[1], "In1");
- BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
+ CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0).size(), 2u);
+ CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
+ CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0)[0], "In0");
+ CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0)[1], "In1");
+ CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/L2Normalization.cpp b/src/armnnTfLiteParser/test/L2Normalization.cpp
index 0dd5eeffac..f4eeaac051 100644
--- a/src/armnnTfLiteParser/test/L2Normalization.cpp
+++ b/src/armnnTfLiteParser/test/L2Normalization.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_L2Normalization")
+{
struct L2NormalizationFixture : public ParserFlatbuffersFixture
{
explicit L2NormalizationFixture(const std::string & inputOutputShape)
@@ -82,7 +81,7 @@ struct L2NormalizationFixture4D : L2NormalizationFixture
L2NormalizationFixture4D() : L2NormalizationFixture("[ 1, 1, 4, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseL2Normalization4D, L2NormalizationFixture4D)
+TEST_CASE_FIXTURE(L2NormalizationFixture4D, "ParseL2Normalization4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -113,7 +112,7 @@ struct L2NormalizationSimpleFixture4D : L2NormalizationFixture
L2NormalizationSimpleFixture4D() : L2NormalizationFixture("[ 1, 1, 1, 4 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseL2NormalizationEps4D, L2NormalizationSimpleFixture4D)
+TEST_CASE_FIXTURE(L2NormalizationSimpleFixture4D, "ParseL2NormalizationEps4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -125,4 +124,4 @@ BOOST_FIXTURE_TEST_CASE(ParseL2NormalizationEps4D, L2NormalizationSimpleFixture4
0.00000004f / CalcL2Norm({ 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }) }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/LeakyRelu.cpp b/src/armnnTfLiteParser/test/LeakyRelu.cpp
index 471c01444c..20f95abee5 100644
--- a/src/armnnTfLiteParser/test/LeakyRelu.cpp
+++ b/src/armnnTfLiteParser/test/LeakyRelu.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_LeakyRelu")
+{
struct LeakyReluFixture : public ParserFlatbuffersFixture
{
explicit LeakyReluFixture()
@@ -69,11 +68,11 @@ struct LeakyReluFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(ParseLeakyRelu, LeakyReluFixture)
+TEST_CASE_FIXTURE(LeakyReluFixture, "ParseLeakyRelu")
{
RunTest<2, armnn::DataType::Float32>(0,
{{ "inputTensor", { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f }}},
{{ "outputTensor", { -0.001f, -0.002f, -0.003f, -0.004f, 0.1f, 0.2f, 0.3f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/LoadModel.cpp b/src/armnnTfLiteParser/test/LoadModel.cpp
index 1afb5f12e5..e09de68c72 100644
--- a/src/armnnTfLiteParser/test/LoadModel.cpp
+++ b/src/armnnTfLiteParser/test/LoadModel.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
@@ -13,8 +13,8 @@ using ModelPtr = TfLiteParserImpl::ModelPtr;
using SubgraphPtr = TfLiteParserImpl::SubgraphPtr;
using OperatorPtr = TfLiteParserImpl::OperatorPtr;
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_LoadModel")
+{
struct LoadModelFixture : public ParserFlatbuffersFixture
{
explicit LoadModelFixture()
@@ -137,53 +137,53 @@ struct LoadModelFixture : public ParserFlatbuffersFixture
const std::vector<tflite::BuiltinOperator>& opcodes,
size_t subgraphs, const std::string desc, size_t buffers)
{
- BOOST_CHECK(model);
- BOOST_CHECK_EQUAL(version, model->version);
- BOOST_CHECK_EQUAL(opcodeSize, model->operator_codes.size());
+ CHECK(model);
+ CHECK_EQ(version, model->version);
+ CHECK_EQ(opcodeSize, model->operator_codes.size());
CheckBuiltinOperators(opcodes, model->operator_codes);
- BOOST_CHECK_EQUAL(subgraphs, model->subgraphs.size());
- BOOST_CHECK_EQUAL(desc, model->description);
- BOOST_CHECK_EQUAL(buffers, model->buffers.size());
+ CHECK_EQ(subgraphs, model->subgraphs.size());
+ CHECK_EQ(desc, model->description);
+ CHECK_EQ(buffers, model->buffers.size());
}
void CheckBuiltinOperators(const std::vector<tflite::BuiltinOperator>& expectedOperators,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>>& result)
{
- BOOST_CHECK_EQUAL(expectedOperators.size(), result.size());
+ CHECK_EQ(expectedOperators.size(), result.size());
for (size_t i = 0; i < expectedOperators.size(); i++)
{
- BOOST_CHECK_EQUAL(expectedOperators[i], result[i]->builtin_code);
+ CHECK_EQ(expectedOperators[i], result[i]->builtin_code);
}
}
void CheckSubgraph(const SubgraphPtr& subgraph, size_t tensors, const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs, size_t operators, const std::string& name)
{
- BOOST_CHECK(subgraph);
- BOOST_CHECK_EQUAL(tensors, subgraph->tensors.size());
- BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(), subgraph->inputs.begin(), subgraph->inputs.end());
- BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
- subgraph->outputs.begin(), subgraph->outputs.end());
- BOOST_CHECK_EQUAL(operators, subgraph->operators.size());
- BOOST_CHECK_EQUAL(name, subgraph->name);
+ CHECK(subgraph);
+ CHECK_EQ(tensors, subgraph->tensors.size());
+ CHECK(std::equal(inputs.begin(), inputs.end(), subgraph->inputs.begin(), subgraph->inputs.end()));
+ CHECK(std::equal(outputs.begin(), outputs.end(),
+ subgraph->outputs.begin(), subgraph->outputs.end()));
+ CHECK_EQ(operators, subgraph->operators.size());
+ CHECK_EQ(name, subgraph->name);
}
void CheckOperator(const OperatorPtr& operatorPtr, uint32_t opcode, const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs, tflite::BuiltinOptions optionType,
tflite::CustomOptionsFormat custom_options_format)
{
- BOOST_CHECK(operatorPtr);
- BOOST_CHECK_EQUAL(opcode, operatorPtr->opcode_index);
- BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(),
- operatorPtr->inputs.begin(), operatorPtr->inputs.end());
- BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
- operatorPtr->outputs.begin(), operatorPtr->outputs.end());
- BOOST_CHECK_EQUAL(optionType, operatorPtr->builtin_options.type);
- BOOST_CHECK_EQUAL(custom_options_format, operatorPtr->custom_options_format);
+ CHECK(operatorPtr);
+ CHECK_EQ(opcode, operatorPtr->opcode_index);
+ CHECK(std::equal(inputs.begin(), inputs.end(),
+ operatorPtr->inputs.begin(), operatorPtr->inputs.end()));
+ CHECK(std::equal(outputs.begin(), outputs.end(),
+ operatorPtr->outputs.begin(), operatorPtr->outputs.end()));
+ CHECK_EQ(optionType, operatorPtr->builtin_options.type);
+ CHECK_EQ(custom_options_format, operatorPtr->custom_options_format);
}
};
-BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
+TEST_CASE_FIXTURE(LoadModelFixture, "LoadModelFromBinary")
{
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
m_GraphBinary.size());
@@ -197,14 +197,14 @@ BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
tflite::CustomOptionsFormat_FLEXBUFFERS);
}
-BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
+TEST_CASE_FIXTURE(LoadModelFixture, "LoadModelFromFile")
{
using namespace fs;
fs::path fname = armnnUtils::Filesystem::NamedTempFile("Armnn-tfLite-LoadModelFromFile-TempFile.csv");
bool saved = flatbuffers::SaveFile(fname.c_str(),
reinterpret_cast<char *>(m_GraphBinary.data()),
m_GraphBinary.size(), true);
- BOOST_CHECK_MESSAGE(saved, "Cannot save test file");
+ CHECK_MESSAGE(saved, "Cannot save test file");
TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromFile(fname.c_str());
CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
@@ -218,26 +218,26 @@ BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
remove(fname);
}
-BOOST_AUTO_TEST_CASE(LoadNullBinary)
+TEST_CASE("LoadNullBinary")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(LoadInvalidBinary)
+TEST_CASE("LoadInvalidBinary")
{
std::string testData = "invalid data";
- BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
+ CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
testData.length()), armnn::ParseException);
}
-BOOST_AUTO_TEST_CASE(LoadFileNotFound)
+TEST_CASE("LoadFileNotFound")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
+ CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
}
-BOOST_AUTO_TEST_CASE(LoadNullPtrFile)
+TEST_CASE("LoadNullPtrFile")
{
- BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
index 89a6640e41..a6e9c88346 100644
--- a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
+++ b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
@@ -8,10 +8,8 @@
#include <string>
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_LoadScopeDynamicTensor")
+{
struct LoadScopeDynamicTensorFixture : public ParserFlatbuffersFixture
{
explicit LoadScopeDynamicTensorFixture(const std::string& shape0,
@@ -144,7 +142,7 @@ struct LoadScopeDynamicTensor2Fixture : LoadScopeDynamicTensorFixture
LoadScopeDynamicTensor2Fixture() : LoadScopeDynamicTensorFixture("[ 1, 3, 3, 2 ]", "[ ]", "[ 1, 1, 1, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor0, LoadScopeDynamicTensor0Fixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensor0Fixture, "LoadScopeDynamicTensor0")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -153,7 +151,7 @@ BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor0, LoadScopeDynamicTensor0Fixture)
true);
}
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor1, LoadScopeDynamicTensor1Fixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensor1Fixture, "LoadScopeDynamicTensor1")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -162,7 +160,7 @@ BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor1, LoadScopeDynamicTensor1Fixture)
true);
}
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor2, LoadScopeDynamicTensor2Fixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensor2Fixture, "LoadScopeDynamicTensor2")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -340,7 +338,7 @@ struct LoadScopeDynamicTensorBroadcasting1DFixture : LoadScopeDynamicTensorBroad
"[ 1, 2, 3, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting3D, LoadScopeDynamicTensorBroadcasting3DFixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensorBroadcasting3DFixture, "LoadScopeDynamicTensorBroadcasting3D")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -352,7 +350,7 @@ BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting3D, LoadScopeDynamicTe
true);
}
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting2D, LoadScopeDynamicTensorBroadcasting2DFixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensorBroadcasting2DFixture, "LoadScopeDynamicTensorBroadcasting2D")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -364,7 +362,7 @@ BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting2D, LoadScopeDynamicTe
true);
}
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting1D, LoadScopeDynamicTensorBroadcasting1DFixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensorBroadcasting1DFixture, "LoadScopeDynamicTensorBroadcasting1D")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -376,4 +374,4 @@ BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting1D, LoadScopeDynamicTe
true);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp
index 8cbef97e2f..bcafac6710 100644
--- a/src/armnnTfLiteParser/test/MaxPool2D.cpp
+++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp
@@ -2,12 +2,12 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "ParserFlatbuffersFixture.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_MaxPool2D")
+{
struct MaxPool2DFixture : public ParserFlatbuffersFixture
{
explicit MaxPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
@@ -96,25 +96,25 @@ struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture
MaxPoolLiteFixtureUint2DOutput() : MaxPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
};
-BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureUint1DOutput, "MaxPoolLiteUint1DOutput")
{
RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 });
}
-BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureFloat1DOutput, "MaxPoolLiteFloat1DOutput")
{
RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
}
-BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureUint2DOutput, "MaxPoolLiteUint2DOutput")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
}
-BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureFloat1DOutput, "MaxPoolIncorrectDataTypeError")
{
- BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+ CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Maximum.cpp b/src/armnnTfLiteParser/test/Maximum.cpp
index 56a1ecf6f0..caf1c70ff4 100644
--- a/src/armnnTfLiteParser/test/Maximum.cpp
+++ b/src/armnnTfLiteParser/test/Maximum.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Maximum")
+{
struct MaximumFixture : public ParserFlatbuffersFixture
{
explicit MaximumFixture(const std::string & inputShape1,
@@ -90,7 +89,7 @@ struct MaximumFixture4D4D : MaximumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMaximum4D4D, MaximumFixture4D4D)
+TEST_CASE_FIXTURE(MaximumFixture4D4D, "ParseMaximum4D4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -115,7 +114,7 @@ struct MaximumBroadcastFixture4D4D : MaximumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast4D4D, MaximumBroadcastFixture4D4D)
+TEST_CASE_FIXTURE(MaximumBroadcastFixture4D4D, "ParseMaximumBroadcast4D4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -135,7 +134,7 @@ struct MaximumBroadcastFixture4D1D : MaximumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast4D1D, MaximumBroadcastFixture4D1D)
+TEST_CASE_FIXTURE(MaximumBroadcastFixture4D1D, "ParseMaximumBroadcast4D1D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -157,7 +156,7 @@ struct MaximumBroadcastFixture1D4D : MaximumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast1D4D, MaximumBroadcastFixture1D4D)
+TEST_CASE_FIXTURE(MaximumBroadcastFixture1D4D, "ParseMaximumBroadcast1D4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -172,4 +171,4 @@ BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast1D4D, MaximumBroadcastFixture1D4D)
9.0f, 10.0f, 11.0f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Mean.cpp b/src/armnnTfLiteParser/test/Mean.cpp
index 3f0fdf14d0..935118256a 100644
--- a/src/armnnTfLiteParser/test/Mean.cpp
+++ b/src/armnnTfLiteParser/test/Mean.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Mean")
+{
struct MeanNoReduceFixture : public ParserFlatbuffersFixture
{
explicit MeanNoReduceFixture(const std::string & inputShape,
@@ -89,10 +88,10 @@ struct SimpleMeanNoReduceFixture : public MeanNoReduceFixture
SimpleMeanNoReduceFixture() : MeanNoReduceFixture("[ 2, 2 ]", "[ 1, 1 ]", "[ 0 ]", "[ ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMeanNoReduce, SimpleMeanNoReduceFixture)
+TEST_CASE_FIXTURE(SimpleMeanNoReduceFixture, "ParseMeanNoReduce")
{
RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
{{ "outputTensor", { 1.5f } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Minimum.cpp b/src/armnnTfLiteParser/test/Minimum.cpp
index 8c6db680e7..7aec63841a 100644
--- a/src/armnnTfLiteParser/test/Minimum.cpp
+++ b/src/armnnTfLiteParser/test/Minimum.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Minimum")
+{
struct MinimumFixture : public ParserFlatbuffersFixture
{
explicit MinimumFixture(const std::string & inputShape1,
@@ -90,7 +89,7 @@ struct MinimumFixture4D : MinimumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMinimum4D, MinimumFixture4D)
+TEST_CASE_FIXTURE(MinimumFixture4D, "ParseMinimum4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -115,7 +114,7 @@ struct MinimumBroadcastFixture4D : MinimumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast4D, MinimumBroadcastFixture4D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture4D, "ParseMinimumBroadcast4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -136,7 +135,7 @@ struct MinimumBroadcastFixture4D1D : MinimumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast4D1D, MinimumBroadcastFixture4D1D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture4D1D, "ParseMinimumBroadcast4D1D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -158,7 +157,7 @@ struct MinimumBroadcastFixture1D4D : MinimumFixture
"[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast1D4D, MinimumBroadcastFixture1D4D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture1D4D, "ParseMinimumBroadcast1D4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -242,7 +241,7 @@ struct MinimumBroadcastFixture2D0D : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast2D0D, MinimumBroadcastFixture2D0D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture2D0D, "ParseMinimumBroadcast2D0D")
{
RunTest<2, armnn::DataType::Float32>(
0,
@@ -250,4 +249,4 @@ BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast2D0D, MinimumBroadcastFixture2D0D)
{{"output", { 1.0f, 2.0f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Multiplication.cpp b/src/armnnTfLiteParser/test/Multiplication.cpp
index 329649a6cb..60756274a1 100644
--- a/src/armnnTfLiteParser/test/Multiplication.cpp
+++ b/src/armnnTfLiteParser/test/Multiplication.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Multiplication")
+{
struct MultiplicationFixture : public ParserFlatbuffersFixture
{
explicit MultiplicationFixture(const std::string & inputShape1,
@@ -92,7 +91,7 @@ struct SimpleMultiplicationFixture : public MultiplicationFixture
SimpleMultiplicationFixture() : MultiplicationFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture)
+TEST_CASE_FIXTURE(SimpleMultiplicationFixture, "ParseMultiplication")
{
using armnn::DataType;
RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
@@ -114,7 +113,7 @@ struct MultiplicationBroadcastFixture4D1D : public MultiplicationFixture
MultiplicationBroadcastFixture4D1D() : MultiplicationFixture("[ 1, 2, 2, 3 ]", "[ 1 ]", "[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast4D1D, MultiplicationBroadcastFixture4D1D)
+TEST_CASE_FIXTURE(MultiplicationBroadcastFixture4D1D, "ParseMultiplicationBroadcast4D1D")
{
using armnn::DataType;
RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
@@ -133,7 +132,7 @@ struct MultiplicationBroadcastFixture1D4D : public MultiplicationFixture
MultiplicationBroadcastFixture1D4D() : MultiplicationFixture("[ 1 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast1D4D, MultiplicationBroadcastFixture1D4D)
+TEST_CASE_FIXTURE(MultiplicationBroadcastFixture1D4D, "ParseMultiplicationBroadcast1D4D")
{
using armnn::DataType;
RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 3.0f } },
@@ -147,4 +146,4 @@ BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast1D4D, MultiplicationBroadcas
27.0f, 30.0f, 33.0f } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
index e616158f29..395038d959 100644
--- a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
+++ b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
@@ -3,11 +3,15 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "../TfLiteParser.hpp"
#include <iostream>
#include <string>
+#include <doctest/doctest.h>
+
+TEST_SUITE("TensorflowLiteParser_OutputShapeOfSqueeze")
+{
+
struct TfLiteParserFixture
{
@@ -19,41 +23,38 @@ struct TfLiteParserFixture
};
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser);
-
-
-BOOST_FIXTURE_TEST_CASE( EmptySqueezeDims_OutputWithAllDimensionsSqueezed, TfLiteParserFixture )
+TEST_CASE_FIXTURE(TfLiteParserFixture, "EmptySqueezeDims_OutputWithAllDimensionsSqueezed")
{
std::vector<uint32_t> squeezeDims = { };
armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
- BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
- BOOST_TEST(outputTensorInfo.GetNumDimensions() == 2);
- BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 2, 2 })));
+ CHECK(outputTensorInfo.GetNumElements() == 4);
+ CHECK(outputTensorInfo.GetNumDimensions() == 2);
+ CHECK((outputTensorInfo.GetShape() == armnn::TensorShape({ 2, 2 })));
};
-BOOST_FIXTURE_TEST_CASE( SqueezeDimsNotIncludingSizeOneDimensions_NoDimensionsSqueezedInOutput, TfLiteParserFixture )
+TEST_CASE_FIXTURE(TfLiteParserFixture, "SqueezeDimsNotIncludingSizeOneDimensions_NoDimensionsSqueezedInOutput")
{
std::vector<uint32_t> squeezeDims = { 1, 2 };
armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
- BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
- BOOST_TEST(outputTensorInfo.GetNumDimensions() == 4);
- BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+ CHECK(outputTensorInfo.GetNumElements() == 4);
+ CHECK(outputTensorInfo.GetNumDimensions() == 4);
+ CHECK((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
};
-BOOST_FIXTURE_TEST_CASE( SqueezeDimsRangePartial_OutputWithDimensionsWithinRangeSqueezed, TfLiteParserFixture )
+TEST_CASE_FIXTURE(TfLiteParserFixture, "SqueezeDimsRangePartial_OutputWithDimensionsWithinRangeSqueezed")
{
std::vector<uint32_t> squeezeDims = { 1, 3 };
armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
- BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
- BOOST_TEST(outputTensorInfo.GetNumDimensions() == 3);
- BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2 })));
+ CHECK(outputTensorInfo.GetNumElements() == 4);
+ CHECK(outputTensorInfo.GetNumDimensions() == 3);
+ CHECK((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2 })));
};
-BOOST_AUTO_TEST_SUITE_END(); \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Pack.cpp b/src/armnnTfLiteParser/test/Pack.cpp
index 011312f7c9..4aff8feca4 100644
--- a/src/armnnTfLiteParser/test/Pack.cpp
+++ b/src/armnnTfLiteParser/test/Pack.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Pack")
+{
struct PackFixture : public ParserFlatbuffersFixture
{
explicit PackFixture(const std::string & inputShape,
@@ -103,7 +102,7 @@ struct SimplePackFixture : PackFixture
"3") {}
};
-BOOST_FIXTURE_TEST_CASE(ParsePack, SimplePackFixture)
+TEST_CASE_FIXTURE(SimplePackFixture, "ParsePack")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -150,4 +149,4 @@ BOOST_FIXTURE_TEST_CASE(ParsePack, SimplePackFixture)
18, 36 } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp
index aab1536628..1ac06277f8 100644
--- a/src/armnnTfLiteParser/test/Pad.cpp
+++ b/src/armnnTfLiteParser/test/Pad.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Pad")
+{
struct PadFixture : public ParserFlatbuffersFixture
{
explicit PadFixture(const std::string& inputShape,
@@ -93,7 +92,7 @@ struct SimplePadFixture : public PadFixture
"[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
+TEST_CASE_FIXTURE(SimplePadFixture, "ParsePad")
{
RunTest<2, armnn::DataType::Float32>
(0,
@@ -111,7 +110,7 @@ struct Uint8PadFixture : public PadFixture
"UINT8", "-2.0", "3") {}
};
-BOOST_FIXTURE_TEST_CASE(ParsePadUint8, Uint8PadFixture)
+TEST_CASE_FIXTURE(Uint8PadFixture, "ParsePadUint8")
{
RunTest<2, armnn::DataType::QAsymmU8>
(0,
@@ -129,7 +128,7 @@ struct Int8PadFixture : public PadFixture
"INT8", "-2.0", "3") {}
};
-BOOST_FIXTURE_TEST_CASE(ParsePadInt8, Int8PadFixture)
+TEST_CASE_FIXTURE(Int8PadFixture, "ParsePadInt8")
{
RunTest<2, armnn::DataType::QAsymmS8>
(0,
@@ -140,4 +139,4 @@ BOOST_FIXTURE_TEST_CASE(ParsePadInt8, Int8PadFixture)
3, 3, 3, 3, 3, 3, 3 }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 196af190fd..b0bfdfc016 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -20,6 +20,7 @@
#include <test/TensorHelpers.hpp>
#include <fmt/format.h>
+#include <doctest/doctest.h>
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
@@ -207,22 +208,22 @@ struct ParserFlatbuffersFixture
const std::vector<float>& min, const std::vector<float>& max,
const std::vector<float>& scale, const std::vector<int64_t>& zeroPoint)
{
- BOOST_CHECK(tensors);
- BOOST_CHECK_EQUAL(shapeSize, tensors->shape.size());
- BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end());
- BOOST_CHECK_EQUAL(tensorType, tensors->type);
- BOOST_CHECK_EQUAL(buffer, tensors->buffer);
- BOOST_CHECK_EQUAL(name, tensors->name);
- BOOST_CHECK(tensors->quantization);
- BOOST_CHECK_EQUAL_COLLECTIONS(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
- tensors->quantization.get()->min.end());
- BOOST_CHECK_EQUAL_COLLECTIONS(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
- tensors->quantization.get()->max.end());
- BOOST_CHECK_EQUAL_COLLECTIONS(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
- tensors->quantization.get()->scale.end());
- BOOST_CHECK_EQUAL_COLLECTIONS(zeroPoint.begin(), zeroPoint.end(),
+ CHECK(tensors);
+ CHECK_EQ(shapeSize, tensors->shape.size());
+ CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
+ CHECK_EQ(tensorType, tensors->type);
+ CHECK_EQ(buffer, tensors->buffer);
+ CHECK_EQ(name, tensors->name);
+ CHECK(tensors->quantization);
+ CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
+ tensors->quantization.get()->min.end()));
+ CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
+ tensors->quantization.get()->max.end()));
+ CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
+ tensors->quantization.get()->scale.end()));
+ CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
tensors->quantization.get()->zero_point.begin(),
- tensors->quantization.get()->zero_point.end());
+ tensors->quantization.get()->zero_point.end()));
}
private:
@@ -302,7 +303,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
// Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
- BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
+ CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
NumOutputDimensions,
outputNumDimensions,
@@ -324,7 +325,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
auto result = CompareTensors(outputExpected, outputStorage[it.first],
bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
false, isDynamic);
- BOOST_TEST(result.m_Result, result.m_Message.str());
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
}
}
@@ -368,7 +369,7 @@ void ParserFlatbuffersFixture::RunTest(std::size_t subgraphId,
{
for (unsigned int i = 0; i < out.size(); ++i)
{
- BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
+ CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
}
}
}
@@ -404,7 +405,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
// Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
- BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
+ CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
NumOutputDimensions,
outputNumDimensions,
@@ -425,6 +426,6 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
auto outputExpected = it.second;
auto result = CompareTensors(outputExpected, outputStorage[it.first],
bindingInfo.second.GetShape(), bindingInfo.second.GetShape(), false);
- BOOST_TEST(result.m_Result, result.m_Message.str());
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
}
} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Prelu.cpp b/src/armnnTfLiteParser/test/Prelu.cpp
index 83c4088377..6c70ff6287 100644
--- a/src/armnnTfLiteParser/test/Prelu.cpp
+++ b/src/armnnTfLiteParser/test/Prelu.cpp
@@ -3,14 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Prelu")
+{
struct PreluFixture : public ParserFlatbuffersFixture
{
explicit PreluFixture(const std::string& inputShape,
@@ -356,7 +356,7 @@ struct PreluDynamicTensorFixture : PreluFixture
"\"data\": [ 0, 0, 128, 62 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SimplePrelu, SimplePreluFixture)
+TEST_CASE_FIXTURE(SimplePreluFixture, "SimplePrelu")
{
RunTest<2, armnn::DataType::Float32>(
0,
@@ -364,7 +364,7 @@ BOOST_FIXTURE_TEST_CASE(SimplePrelu, SimplePreluFixture)
{{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
}
-BOOST_FIXTURE_TEST_CASE(PreluConstAlpha, PreluConstAlphaFixture)
+TEST_CASE_FIXTURE(PreluConstAlphaFixture, "PreluConstAlpha")
{
RunTest<3, armnn::DataType::Float32>(
0,
@@ -372,7 +372,7 @@ BOOST_FIXTURE_TEST_CASE(PreluConstAlpha, PreluConstAlphaFixture)
{{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
}
-BOOST_FIXTURE_TEST_CASE(PreluBroadcastAlpha, PreluBroadcastAlphaFixture)
+TEST_CASE_FIXTURE(PreluBroadcastAlphaFixture, "PreluBroadcastAlpha")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -380,7 +380,7 @@ BOOST_FIXTURE_TEST_CASE(PreluBroadcastAlpha, PreluBroadcastAlphaFixture)
{{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
}
-BOOST_FIXTURE_TEST_CASE(PreluDynamicTensor, PreluDynamicTensorFixture)
+TEST_CASE_FIXTURE(PreluDynamicTensorFixture, "PreluDynamicTensor")
{
RunTest<2, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -389,7 +389,7 @@ BOOST_FIXTURE_TEST_CASE(PreluDynamicTensor, PreluDynamicTensorFixture)
true);
}
-BOOST_FIXTURE_TEST_CASE(PreluNetwork, PreluNetworkFixture)
+TEST_CASE_FIXTURE(PreluNetworkFixture, "PreluNetwork")
{
RunTest<3, armnn::DataType::Float32>(
0,
@@ -397,4 +397,4 @@ BOOST_FIXTURE_TEST_CASE(PreluNetwork, PreluNetworkFixture)
{{"output", { -21.f, 12.f, 0.f, 6.f, -7.5f, 84.f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Quantize.cpp b/src/armnnTfLiteParser/test/Quantize.cpp
index ca5e6d5091..c7c936e745 100644
--- a/src/armnnTfLiteParser/test/Quantize.cpp
+++ b/src/armnnTfLiteParser/test/Quantize.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Quantize")
+{
struct QuantizeFixture : public ParserFlatbuffersFixture
{
explicit QuantizeFixture(const std::string & inputShape,
@@ -80,7 +79,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
"UINT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
+ TEST_CASE_FIXTURE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
{
RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedAsymm8>(
0,
@@ -95,7 +94,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
"INT16") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQsymm16, SimpleQuantizeFixtureQSymm16)
+ TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymm16, "SimpleQuantizeQsymm16")
{
RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedSymm16>(
0,
@@ -110,7 +109,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
"INT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQSymmS8, SimpleQuantizeFixtureQSymmS8)
+ TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymmS8, "SimpleQuantizeQSymmS8")
{
RunTest<2, armnn::DataType::Float32, armnn::DataType::QSymmS8>(
0,
@@ -118,4 +117,4 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
{{"outputTensor", { 0, 1, 5, 127, -128, -1 }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Reduce.cpp b/src/armnnTfLiteParser/test/Reduce.cpp
index c2a22f0b86..cde9d09cd3 100644
--- a/src/armnnTfLiteParser/test/Reduce.cpp
+++ b/src/armnnTfLiteParser/test/Reduce.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Reduce")
+{
struct ReduceMaxFixture : public ParserFlatbuffersFixture
{
explicit ReduceMaxFixture(const std::string& inputShape,
@@ -93,7 +92,7 @@ struct SimpleReduceMaxFixture : public ReduceMaxFixture
SimpleReduceMaxFixture() : ReduceMaxFixture("[ 1, 1, 2, 3 ]", "[ 1, 1, 1, 3 ]", "[ 1 ]", "[ 2,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReduceMax, SimpleReduceMaxFixture)
+TEST_CASE_FIXTURE(SimpleReduceMaxFixture, "ParseReduceMax")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>
(0, {{ "inputTensor", { 1001.0f, 11.0f, 1003.0f,
@@ -182,7 +181,7 @@ struct SimpleReduceMinFixture : public ReduceMinFixture
SimpleReduceMinFixture() : ReduceMinFixture("[ 1, 1, 2, 3 ]", "[ 1, 1, 1, 3 ]", "[ 1 ]", "[ 2, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReduceMin, SimpleReduceMinFixture)
+TEST_CASE_FIXTURE(SimpleReduceMinFixture, "ParseReduceMin")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>
(0, {{ "inputTensor", { 1001.0f, 11.0f, 1003.0f,
@@ -190,4 +189,4 @@ BOOST_FIXTURE_TEST_CASE(ParseReduceMin, SimpleReduceMinFixture)
{{ "outputTensor", { 10.0f, 11.0f, 12.0f } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp
index 025612f097..0824a27f87 100644
--- a/src/armnnTfLiteParser/test/Reshape.cpp
+++ b/src/armnnTfLiteParser/test/Reshape.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Reshape")
+{
struct ReshapeFixture : public ParserFlatbuffersFixture
{
explicit ReshapeFixture(const std::string& inputShape,
@@ -83,13 +82,13 @@ struct ReshapeFixtureWithReshapeDims : ReshapeFixture
ReshapeFixtureWithReshapeDims() : ReshapeFixture("[ 1, 9 ]", "[ 3, 3 ]", "[ 3, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDims, "ParseReshapeWithReshapeDims")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({3,3})));
}
@@ -98,13 +97,13 @@ struct ReshapeFixtureWithReshapeDimsFlatten : ReshapeFixture
ReshapeFixtureWithReshapeDimsFlatten() : ReshapeFixture("[ 3, 3 ]", "[ 9 ]", "[ -1 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDimsFlatten, "ParseReshapeWithReshapeDimsFlatten")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<1, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({9})));
}
@@ -113,13 +112,13 @@ struct ReshapeFixtureWithReshapeDimsFlattenTwoDims : ReshapeFixture
ReshapeFixtureWithReshapeDimsFlattenTwoDims() : ReshapeFixture("[ 3, 2, 3 ]", "[ 2, 9 ]", "[ 2, -1 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDimsFlattenTwoDims, "ParseReshapeWithReshapeDimsFlattenTwoDims")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,9})));
}
@@ -128,13 +127,13 @@ struct ReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
ReshapeFixtureWithReshapeDimsFlattenOneDim() : ReshapeFixture("[ 2, 9 ]", "[ 2, 3, 3 ]", "[ 2, -1, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDimsFlattenOneDim, "ParseReshapeWithReshapeDimsFlattenOneDim")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<3, armnn::DataType::QAsymmU8>(0,
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,3,3})));
}
@@ -145,7 +144,7 @@ struct DynamicReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
"[ 2, -1, 3 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(DynParseReshapeWithReshapeDimsFlattenOneDim, DynamicReshapeFixtureWithReshapeDimsFlattenOneDim)
+TEST_CASE_FIXTURE(DynamicReshapeFixtureWithReshapeDimsFlattenOneDim, "DynParseReshapeWithReshapeDimsFlattenOneDim")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<3,
@@ -156,4 +155,4 @@ BOOST_FIXTURE_TEST_CASE(DynParseReshapeWithReshapeDimsFlattenOneDim, DynamicResh
true);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ResizeBilinear.cpp b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
index 8af5612b9e..dce9e1d914 100644
--- a/src/armnnTfLiteParser/test/ResizeBilinear.cpp
+++ b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ResizeBilinear")
+{
struct ResizeBilinearFixture : public ParserFlatbuffersFixture
{
explicit ResizeBilinearFixture(const std::string & inputShape,
@@ -98,7 +97,7 @@ struct SimpleResizeBilinearFixture : ResizeBilinearFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, SimpleResizeBilinearFixture)
+TEST_CASE_FIXTURE(SimpleResizeBilinearFixture, "ParseResizeBilinear")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -113,4 +112,4 @@ BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, SimpleResizeBilinearFixture)
);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
index 7add5f2a3e..948f4fe0cd 100644
--- a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
+++ b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ResizeNearestNeighbor")
+{
struct ResizeNearestNeighborFixture : public ParserFlatbuffersFixture
{
explicit ResizeNearestNeighborFixture(const std::string & inputShape,
@@ -98,7 +97,7 @@ struct SimpleResizeNearestNeighborFixture : ResizeNearestNeighborFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseResizeNearestNeighbor, SimpleResizeNearestNeighborFixture)
+TEST_CASE_FIXTURE(SimpleResizeNearestNeighborFixture, "ParseResizeNearestNeighbor")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -106,4 +105,4 @@ BOOST_FIXTURE_TEST_CASE(ParseResizeNearestNeighbor, SimpleResizeNearestNeighborF
{{"OutputTensor", { 1.0f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Slice.cpp b/src/armnnTfLiteParser/test/Slice.cpp
index b94a9832b7..80dff73146 100644
--- a/src/armnnTfLiteParser/test/Slice.cpp
+++ b/src/armnnTfLiteParser/test/Slice.cpp
@@ -3,12 +3,11 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Slice")
+{
struct SliceFixture : public ParserFlatbuffersFixture
{
explicit SliceFixture(const std::string & inputShape,
@@ -124,14 +123,14 @@ struct SliceFixtureSingleDim : SliceFixture
"[ 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SliceSingleDim, SliceFixtureSingleDim)
+TEST_CASE_FIXTURE(SliceFixtureSingleDim, "SliceSingleDim")
{
RunTest<3, armnn::DataType::Float32>(
0,
{{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
{{"outputTensor", { 3, 3, 3 }}});
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({1,1,3})));
}
@@ -143,14 +142,14 @@ struct SliceFixtureD123 : SliceFixture
"[ 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SliceD123, SliceFixtureD123)
+TEST_CASE_FIXTURE(SliceFixtureD123, "SliceD123")
{
RunTest<3, armnn::DataType::Float32>(
0,
{{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
{{"outputTensor", { 3, 3, 3, 4, 4, 4 }}});
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({1,2,3})));
}
@@ -162,14 +161,14 @@ struct SliceFixtureD213 : SliceFixture
"[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SliceD213, SliceFixtureD213)
+TEST_CASE_FIXTURE(SliceFixtureD213, "SliceD213")
{
RunTest<3, armnn::DataType::Float32>(
0,
{{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
{{"outputTensor", { 3, 3, 3, 5, 5, 5 }}});
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,1,3})));
}
@@ -181,7 +180,7 @@ struct DynamicSliceFixtureD213 : SliceFixture
"[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(DynamicSliceD213, DynamicSliceFixtureD213)
+TEST_CASE_FIXTURE(DynamicSliceFixtureD213, "DynamicSliceD213")
{
RunTest<3, armnn::DataType::Float32, armnn::DataType::Float32>(
0,
@@ -190,4 +189,4 @@ BOOST_FIXTURE_TEST_CASE(DynamicSliceD213, DynamicSliceFixtureD213)
true);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
index c4d19f2ac0..11a2a0d84b 100644
--- a/src/armnnTfLiteParser/test/Softmax.cpp
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Softmax")
+{
struct SoftmaxFixture : public ParserFlatbuffersFixture
{
explicit SoftmaxFixture()
@@ -69,9 +68,9 @@ struct SoftmaxFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
+TEST_CASE_FIXTURE(SoftmaxFixture, "ParseSoftmaxLite")
{
RunTest<2, armnn::DataType::QAsymmU8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/SpaceToBatchND.cpp b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
index 6ff4f53bfc..b99713ce0a 100644
--- a/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
+++ b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_SpaceToBatchND")
+{
struct SpaceToBatchNDFixture : public ParserFlatbuffersFixture
{
explicit SpaceToBatchNDFixture(const std::string & inputShape,
@@ -105,7 +104,7 @@ struct SpaceToBatchNDFixtureSimpleTest : public SpaceToBatchNDFixture
"[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdSimpleTest, SpaceToBatchNDFixtureSimpleTest)
+TEST_CASE_FIXTURE(SpaceToBatchNDFixtureSimpleTest, "SpaceToBatchNdSimpleTest")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -128,7 +127,7 @@ struct SpaceToBatchNDFixtureMultipleInputBatchesTest : public SpaceToBatchNDFixt
"[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdMultipleInputBatchesTest, SpaceToBatchNDFixtureMultipleInputBatchesTest)
+TEST_CASE_FIXTURE(SpaceToBatchNDFixtureMultipleInputBatchesTest, "SpaceToBatchNdMultipleInputBatchesTest")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -150,7 +149,7 @@ struct SpaceToBatchNDFixturePaddingTest : public SpaceToBatchNDFixture
"[ 1,0,0,0, 0,0,0,0, 2,0,0,0, 0,0,0,0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdPaddingTest, SpaceToBatchNDFixturePaddingTest)
+TEST_CASE_FIXTURE(SpaceToBatchNDFixturePaddingTest, "SpaceToBatchNdPaddingTest")
{
RunTest<4, armnn::DataType::Float32>
(0,
@@ -175,4 +174,4 @@ BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdPaddingTest, SpaceToBatchNDFixturePaddingT
0.0f, 10.0f, }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Split.cpp b/src/armnnTfLiteParser/test/Split.cpp
index 5f23799fd6..97f8f12339 100644
--- a/src/armnnTfLiteParser/test/Split.cpp
+++ b/src/armnnTfLiteParser/test/Split.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Split")
+{
struct SplitFixture : public ParserFlatbuffersFixture
{
explicit SplitFixture(const std::string& inputShape,
@@ -108,7 +107,7 @@ struct SimpleSplitFixtureFloat32 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoFloat32, SimpleSplitFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplitFixtureFloat32, "ParseAxisOneSplitTwoFloat32")
{
RunTest<4, armnn::DataType::Float32>(
@@ -126,7 +125,7 @@ struct SimpleSplitAxisThreeFixtureFloat32 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoFloat32, SimpleSplitAxisThreeFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplitAxisThreeFixtureFloat32, "ParseAxisThreeSplitTwoFloat32")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -143,7 +142,7 @@ struct SimpleSplit2DFixtureFloat32 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(SimpleSplit2DFloat32, SimpleSplit2DFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplit2DFixtureFloat32, "SimpleSplit2DFloat32")
{
RunTest<2, armnn::DataType::Float32>(
0,
@@ -159,7 +158,7 @@ struct SimpleSplit3DFixtureFloat32 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(SimpleSplit3DFloat32, SimpleSplit3DFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplit3DFixtureFloat32, "SimpleSplit3DFloat32")
{
RunTest<3, armnn::DataType::Float32>(
0,
@@ -176,7 +175,7 @@ struct SimpleSplitFixtureUint8 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoUint8, SimpleSplitFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplitFixtureUint8, "ParseAxisOneSplitTwoUint8")
{
RunTest<4, armnn::DataType::QAsymmU8>(
@@ -194,7 +193,7 @@ struct SimpleSplitAxisThreeFixtureUint8 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoUint8, SimpleSplitAxisThreeFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplitAxisThreeFixtureUint8, "ParseAxisThreeSplitTwoUint8")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -211,7 +210,7 @@ struct SimpleSplit2DFixtureUint8 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(SimpleSplit2DUint8, SimpleSplit2DFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplit2DFixtureUint8, "SimpleSplit2DUint8")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -227,7 +226,7 @@ struct SimpleSplit3DFixtureUint8 : SplitFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(SimpleSplit3DUint8, SimpleSplit3DFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplit3DFixtureUint8, "SimpleSplit3DUint8")
{
RunTest<3, armnn::DataType::QAsymmU8>(
0,
@@ -237,4 +236,4 @@ BOOST_FIXTURE_TEST_CASE(SimpleSplit3DUint8, SimpleSplit3DFixtureUint8)
{"outputTensor2", { 9, 10, 11, 12, 13, 14, 15, 16 } } } );
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/SplitV.cpp b/src/armnnTfLiteParser/test/SplitV.cpp
index 9541114d71..51b75faf07 100644
--- a/src/armnnTfLiteParser/test/SplitV.cpp
+++ b/src/armnnTfLiteParser/test/SplitV.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser")
+{
struct SplitVFixture : public ParserFlatbuffersFixture
{
explicit SplitVFixture(const std::string& inputShape,
@@ -126,7 +125,7 @@ struct SimpleSplitVAxisOneFixture : SplitVFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitVTwo, SimpleSplitVAxisOneFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisOneFixture, "ParseAxisOneSplitVTwo")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -148,7 +147,7 @@ struct SimpleSplitVAxisTwoFixture : SplitVFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisTwoSplitVTwo, SimpleSplitVAxisTwoFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisTwoFixture, "ParseAxisTwoSplitVTwo")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -170,7 +169,7 @@ struct SimpleSplitVAxisThreeFixture : SplitVFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitVTwo, SimpleSplitVAxisThreeFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisThreeFixture, "ParseAxisThreeSplitVTwo")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -192,7 +191,7 @@ struct SimpleSplitVAxisFourFixture : SplitVFixture
{}
};
-BOOST_FIXTURE_TEST_CASE(ParseAxisFourSplitVTwo, SimpleSplitVAxisFourFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisFourFixture, "ParseAxisFourSplitVTwo")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -206,4 +205,4 @@ BOOST_FIXTURE_TEST_CASE(ParseAxisFourSplitVTwo, SimpleSplitVAxisFourFixture)
{"outputTensor2", { 4.0f, 8.0f, 12.0f, 16.0f, 20.0f, 24.0f, 28.0f, 32.0f } } } );
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
index 86a1966dd1..da870fd4c9 100644
--- a/src/armnnTfLiteParser/test/Squeeze.cpp
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Squeeze")
+{
struct SqueezeFixture : public ParserFlatbuffersFixture
{
explicit SqueezeFixture(const std::string& inputShape,
@@ -82,11 +81,11 @@ struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
SqueezeFixtureWithSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2, 1 ]", "[ 0, 1, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
+TEST_CASE_FIXTURE(SqueezeFixtureWithSqueezeDims, "ParseSqueezeWithSqueezeDims")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<3, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2,1})));
}
@@ -96,11 +95,11 @@ struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
SqueezeFixtureWithoutSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2 ]", "") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
+TEST_CASE_FIXTURE(SqueezeFixtureWithoutSqueezeDims, "ParseSqueezeWithoutSqueezeDims")
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
RunTest<2, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2})));
}
@@ -109,10 +108,10 @@ struct SqueezeFixtureWithInvalidInput : SqueezeFixture
SqueezeFixtureWithInvalidInput() : SqueezeFixture("[ 1, 2, 2, 1, 2, 2 ]", "[ 1, 2, 2, 1, 2 ]", "[ ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidInput, SqueezeFixtureWithInvalidInput)
+TEST_CASE_FIXTURE(SqueezeFixtureWithInvalidInput, "ParseSqueezeInvalidInput")
{
static_assert(armnn::MaxNumOfTensorDimensions == 5, "Please update SqueezeFixtureWithInvalidInput");
- BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")),
+ CHECK_THROWS_AS((SetupSingleInputSingleOutput("inputTensor", "outputTensor")),
armnn::InvalidArgumentException);
}
@@ -123,9 +122,9 @@ struct SqueezeFixtureWithSqueezeDimsSizeInvalid : SqueezeFixture
"[ 1, 2, 2, 2, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidSqueezeDims, SqueezeFixtureWithSqueezeDimsSizeInvalid)
+TEST_CASE_FIXTURE(SqueezeFixtureWithSqueezeDimsSizeInvalid, "ParseSqueezeInvalidSqueezeDims")
{
- BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+ CHECK_THROWS_AS((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
}
@@ -136,10 +135,10 @@ struct SqueezeFixtureWithNegativeSqueezeDims : SqueezeFixture
"[ -2 , 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeNegativeSqueezeDims, SqueezeFixtureWithNegativeSqueezeDims)
+TEST_CASE_FIXTURE(SqueezeFixtureWithNegativeSqueezeDims, "ParseSqueezeNegativeSqueezeDims")
{
- BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+ CHECK_THROWS_AS((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/StridedSlice.cpp b/src/armnnTfLiteParser/test/StridedSlice.cpp
index 91427a6420..2951b8890d 100644
--- a/src/armnnTfLiteParser/test/StridedSlice.cpp
+++ b/src/armnnTfLiteParser/test/StridedSlice.cpp
@@ -3,14 +3,13 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_StridedSlice")
+{
struct StridedSliceFixture : public ParserFlatbuffersFixture
{
explicit StridedSliceFixture(const std::string & inputShape,
@@ -115,7 +114,7 @@ struct StridedSlice4DFixture : StridedSliceFixture
) {}
};
-BOOST_FIXTURE_TEST_CASE(StridedSlice4D, StridedSlice4DFixture)
+TEST_CASE_FIXTURE(StridedSlice4DFixture, "StridedSlice4D")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -147,7 +146,7 @@ struct StridedSlice4DReverseFixture : StridedSliceFixture
) {}
};
-BOOST_FIXTURE_TEST_CASE(StridedSlice4DReverse, StridedSlice4DReverseFixture)
+TEST_CASE_FIXTURE(StridedSlice4DReverseFixture, "StridedSlice4DReverse")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -170,7 +169,7 @@ struct StridedSliceSimpleStrideFixture : StridedSliceFixture
) {}
};
-BOOST_FIXTURE_TEST_CASE(StridedSliceSimpleStride, StridedSliceSimpleStrideFixture)
+TEST_CASE_FIXTURE(StridedSliceSimpleStrideFixture, "StridedSliceSimpleStride")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -197,7 +196,7 @@ struct StridedSliceSimpleRangeMaskFixture : StridedSliceFixture
) {}
};
-BOOST_FIXTURE_TEST_CASE(StridedSliceSimpleRangeMask, StridedSliceSimpleRangeMaskFixture)
+TEST_CASE_FIXTURE(StridedSliceSimpleRangeMaskFixture, "StridedSliceSimpleRangeMask")
{
RunTest<4, armnn::DataType::Float32>(
0,
@@ -214,4 +213,4 @@ BOOST_FIXTURE_TEST_CASE(StridedSliceSimpleRangeMask, StridedSliceSimpleRangeMask
5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Sub.cpp b/src/armnnTfLiteParser/test/Sub.cpp
index 2854d81197..4e715ff712 100644
--- a/src/armnnTfLiteParser/test/Sub.cpp
+++ b/src/armnnTfLiteParser/test/Sub.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Sub")
+{
struct SubFixture : public ParserFlatbuffersFixture
{
explicit SubFixture(const std::string & inputShape1,
@@ -95,7 +94,7 @@ struct SimpleSubFixture : SubFixture
"[ 1, 4 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(SimpleSub, SimpleSubFixture)
+TEST_CASE_FIXTURE(SimpleSubFixture, "SimpleSub")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -111,7 +110,7 @@ struct DynamicSubFixture : SubFixture
"[ ]") {}
};
-BOOST_FIXTURE_TEST_CASE(DynamicSub, DynamicSubFixture)
+TEST_CASE_FIXTURE(DynamicSubFixture, "DynamicSub")
{
RunTest<2, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
0,
@@ -121,4 +120,4 @@ BOOST_FIXTURE_TEST_CASE(DynamicSub, DynamicSubFixture)
true);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Sum.cpp b/src/armnnTfLiteParser/test/Sum.cpp
index 177bcd52de..09b20b654b 100644
--- a/src/armnnTfLiteParser/test/Sum.cpp
+++ b/src/armnnTfLiteParser/test/Sum.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Sum")
+{
struct SumFixture : public ParserFlatbuffersFixture
{
explicit SumFixture(const std::string& inputShape,
@@ -93,7 +92,7 @@ struct SimpleSumFixture : public SumFixture
SimpleSumFixture() : SumFixture("[ 1, 3, 2, 4 ]", "[ 1, 1, 1, 4 ]", "[ 2 ]", "[ 1, 0, 0, 0, 2, 0, 0, 0 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(ParseSum, SimpleSumFixture)
+TEST_CASE_FIXTURE(SimpleSumFixture, "ParseSum")
{
RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>
(0, {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f,
@@ -107,4 +106,4 @@ BOOST_FIXTURE_TEST_CASE(ParseSum, SimpleSumFixture)
{{ "outputTensor", { 666.0f, 888.0f, 1110.0f, 1332.0f } } });
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/TfLiteParser.cpp b/src/armnnTfLiteParser/test/TfLiteParser.cpp
index 36827c0586..53fe4a33e7 100644
--- a/src/armnnTfLiteParser/test/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/test/TfLiteParser.cpp
@@ -3,18 +3,18 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
-BOOST_AUTO_TEST_CASE(ParseEmptyBinaryData)
+TEST_SUITE("TensorflowLiteParser")
+{
+TEST_CASE("ParseEmptyBinaryData")
{
ITfLiteParser::TfLiteParserOptions options;
ITfLiteParserPtr m_Parser(ITfLiteParser::Create(armnn::Optional<ITfLiteParser::TfLiteParserOptions>(options)));
// Should throw armnn::ParseException: Buffer doesn't conform to the expected Tensorflow Lite flatbuffers format.
- BOOST_CHECK_THROW(m_Parser->CreateNetworkFromBinary({0}), armnn::ParseException);
+ CHECK_THROWS_AS(m_Parser->CreateNetworkFromBinary({0}), armnn::ParseException);
}
struct NoInputBindingsFixture : public ParserFlatbuffersFixture
@@ -32,10 +32,10 @@ struct NoInputBindingsFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE( ParseBadInputBindings, NoInputBindingsFixture )
+TEST_CASE_FIXTURE(NoInputBindingsFixture, "ParseBadInputBindings")
{
// Should throw armnn::ParseException: No input binding found for subgraph:0 and name:inputTensor.
- BOOST_CHECK_THROW( (RunTest<4, armnn::DataType::QAsymmU8>(0, { }, { 0 })), armnn::ParseException);
+ CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, { }, { 0 })), armnn::ParseException);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Transpose.cpp b/src/armnnTfLiteParser/test/Transpose.cpp
index b2f953e75d..5429e567ef 100644
--- a/src/armnnTfLiteParser/test/Transpose.cpp
+++ b/src/armnnTfLiteParser/test/Transpose.cpp
@@ -3,12 +3,11 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Transpose")
+{
struct TransposeFixture : public ParserFlatbuffersFixture
{
explicit TransposeFixture(const std::string & inputShape,
@@ -118,14 +117,14 @@ struct TransposeFixtureWithPermuteData : TransposeFixture
"[ 2, 3, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(TransposeWithPermuteData, TransposeFixtureWithPermuteData)
+TEST_CASE_FIXTURE(TransposeFixtureWithPermuteData, "TransposeWithPermuteData")
{
RunTest<3, armnn::DataType::Float32>(
0,
{{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
{{"outputTensor", { 1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12 }}});
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,3,2})));
}
@@ -139,15 +138,15 @@ struct TransposeFixtureWithoutPermuteData : TransposeFixture
"[ 3, 2, 2 ]") {}
};
-BOOST_FIXTURE_TEST_CASE(TransposeWithoutPermuteDims, TransposeFixtureWithoutPermuteData)
+TEST_CASE_FIXTURE(TransposeFixtureWithoutPermuteData, "TransposeWithoutPermuteDims")
{
RunTest<3, armnn::DataType::Float32>(
0,
{{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
{{"outputTensor", { 1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12 }}});
- BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+ CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({3,2,2})));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/TransposeConv.cpp b/src/armnnTfLiteParser/test/TransposeConv.cpp
index f990941ad9..0f53e73640 100644
--- a/src/armnnTfLiteParser/test/TransposeConv.cpp
+++ b/src/armnnTfLiteParser/test/TransposeConv.cpp
@@ -3,12 +3,11 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_TransposeConv")
+{
struct TransposeConvFixture : public ParserFlatbuffersFixture
{
explicit TransposeConvFixture(const std::string& inputShape,
@@ -116,7 +115,7 @@ struct SimpleTransposeConvFixture : TransposeConvFixture
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConv, SimpleTransposeConvFixture )
+TEST_CASE_FIXTURE(SimpleTransposeConvFixture, "ParseSimpleTransposeConv")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -255,7 +254,7 @@ struct SimpleTransposeConvFixtureWithBias : TransposeConvFixtureWithBias
{}
};
-BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConvWithBias, SimpleTransposeConvFixtureWithBias )
+TEST_CASE_FIXTURE(SimpleTransposeConvFixtureWithBias, "ParseSimpleTransposeConvWithBias")
{
RunTest<4, armnn::DataType::QAsymmU8>(
0,
@@ -571,7 +570,7 @@ struct TransposeConvPerChannelFixture : public ParserFlatbuffersFixture
}
};
-BOOST_FIXTURE_TEST_CASE( ParseTransposeConvPerChannel, TransposeConvPerChannelFixture )
+TEST_CASE_FIXTURE(TransposeConvPerChannelFixture, "ParseTransposeConvPerChannel")
{
RunTest<4, armnn::DataType::QAsymmS8>(
0,
@@ -601,4 +600,4 @@ BOOST_FIXTURE_TEST_CASE( ParseTransposeConvPerChannel, TransposeConvPerChannelFi
});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
index 4fcd74f585..991352ba6b 100644
--- a/src/armnnTfLiteParser/test/Unpack.cpp
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -3,15 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersFixture.hpp"
#include "../TfLiteParser.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Unpack")
+{
struct UnpackFixture : public ParserFlatbuffersFixture
{
explicit UnpackFixture(const std::string& inputShape,
@@ -110,7 +109,7 @@ struct DefaultUnpackAxisZeroUint8Fixture : UnpackFixture
DefaultUnpackAxisZeroUint8Fixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "", "UINT8", "0.1", "0") {}
};
-BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxisZeroFixture)
+TEST_CASE_FIXTURE(DefaultUnpackAxisZeroFixture, "UnpackAxisZeroNumIsDefaultNotSpecified")
{
RunTest<2, armnn::DataType::Float32>(
0,
@@ -124,7 +123,7 @@ BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxi
{"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
}
-BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecifiedUint8, DefaultUnpackAxisZeroUint8Fixture)
+TEST_CASE_FIXTURE(DefaultUnpackAxisZeroUint8Fixture, "UnpackAxisZeroNumIsDefaultNotSpecifiedUint8")
{
RunTest<2, armnn::DataType::QAsymmU8>(
0,
@@ -148,7 +147,7 @@ struct DefaultUnpackLastAxisUint8Fixture : UnpackFixture
DefaultUnpackLastAxisUint8Fixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6", "UINT8", "0.1", "0") {}
};
-BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
+TEST_CASE_FIXTURE(DefaultUnpackLastAxisFixture, "UnpackLastAxisNumSix")
{
RunTest<2, armnn::DataType::Float32>(
0,
@@ -164,7 +163,7 @@ BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
{"outputTensor6", { 6.0f, 12.0f, 18.0f, 24.0f }} });
}
-BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fixture) {
+TEST_CASE_FIXTURE(DefaultUnpackLastAxisUint8Fixture, "UnpackLastAxisNumSixUint8") {
RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor", { 1, 2, 3, 4, 5, 6,
@@ -179,4 +178,4 @@ BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fix
{"outputTensor6", { 60, 120, 180, 240 }}});
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index b0ac2d60ad..69744f45cf 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -13,14 +13,12 @@
#include <layers/StandInLayer.hpp>
-#include <boost/test/unit_test.hpp>
-
#include <sstream>
#include <string>
#include <vector>
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Unsupported")
+{
using namespace armnn;
class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
@@ -41,27 +39,27 @@ public:
const char*) override
{
unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
- BOOST_CHECK(descriptor.m_NumInputs == numInputs);
- BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
+ CHECK(descriptor.m_NumInputs == numInputs);
+ CHECK(layer->GetNumInputSlots() == numInputs);
unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
- BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
- BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
+ CHECK(descriptor.m_NumOutputs == numOutputs);
+ CHECK(layer->GetNumOutputSlots() == numOutputs);
const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
for (unsigned int i = 0u; i < numInputs; ++i)
{
const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
- BOOST_CHECK(connectedSlot != nullptr);
+ CHECK(connectedSlot != nullptr);
const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
- BOOST_CHECK(inputInfo == m_InputInfos[i]);
+ CHECK(inputInfo == m_InputInfos[i]);
}
for (unsigned int i = 0u; i < numOutputs; ++i)
{
const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
- BOOST_CHECK(outputInfo == m_OutputInfos[i]);
+ CHECK(outputInfo == m_OutputInfos[i]);
}
}
@@ -237,14 +235,14 @@ public:
{ TensorInfo({ 3, 3 }, DataType::Float32) }) {}
};
-BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
+TEST_CASE_FIXTURE(DummyCustom1Input1OutputFixture, "UnsupportedCustomOperator1Input1Output")
{
RunTest();
}
-BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
+TEST_CASE_FIXTURE(DummyCustom2Inputs1OutputFixture, "UnsupportedCustomOperator2Inputs1Output")
{
RunTest();
}
-BOOST_AUTO_TEST_SUITE_END()
+}