aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-06-10 18:24:34 +0100
committerSadik Armagan <sadik.armagan@arm.com>2021-06-11 10:33:16 +0000
commit1625efc870f1a8b7c6e6382277ddbb245f91a294 (patch)
tree39fbbaa15ed7eb81337b082c2d20b0af68b91c02 /src/armnn/test/optimizations
parent958e0ba61e940a8d11955cf2a10f681c7c47e1fa (diff)
downloadarmnn-1625efc870f1a8b7c6e6382277ddbb245f91a294.tar.gz
IVGCVSW-5963 'Move unit tests to new framework'
* Used doctest in ArmNN unit tests Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ia9cf5fc72775878885c5f864abf2c56b3a935f1a
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp95
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp51
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp21
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp21
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp67
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp77
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp13
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp242
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp31
-rw-r--r--src/armnn/test/optimizations/InsertDebugLayerTests.cpp13
-rw-r--r--src/armnn/test/optimizations/MovePermuteUpTests.cpp15
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp15
-rw-r--r--src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp17
-rw-r--r--src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp13
-rw-r--r--src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp19
-rw-r--r--src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp35
-rw-r--r--src/armnn/test/optimizations/PermuteAsReshapeTests.cpp15
-rw-r--r--src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp175
-rw-r--r--src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp13
-rw-r--r--src/armnn/test/optimizations/TransposeAsReshapeTests.cpp15
20 files changed, 490 insertions, 473 deletions
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index d0d728bfab..36a4507fc3 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -8,11 +8,12 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace optimizations;
void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
@@ -36,7 +37,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
input1->GetOutputSlot().Connect(add->GetInputSlot(1));
add->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<AdditionLayer>,
@@ -46,7 +47,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -54,15 +55,15 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName);
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == expectedDataType));
+ CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
+ CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType));
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSimpleTest)
+TEST_CASE("AddBroadcastReshapeLayerSimpleTest")
{
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
const TensorInfo info1({ 1 }, DataType::Float32);
@@ -71,7 +72,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSimpleTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer1DTest)
+TEST_CASE("AddBroadcastReshapeLayer1DTest")
{
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
const TensorInfo info1({ 5 }, DataType::Float32);
@@ -81,7 +82,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer1DTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer2DTest)
+TEST_CASE("AddBroadcastReshapeLayer2DTest")
{
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
const TensorInfo info1({ 3, 5 }, DataType::Float32);
@@ -91,7 +92,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer2DTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DTest)
+TEST_CASE("AddBroadcastReshapeLayer3DTest")
{
const TensorInfo info0({ 2, 1, 1, 1 }, DataType::Float32);
const TensorInfo info1({ 3, 4, 5 }, DataType::Float32);
@@ -101,7 +102,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DMergedTest)
+TEST_CASE("AddBroadcastReshapeLayer3DMergedTest")
{
const TensorInfo info0({ 2, 3, 1, 1 }, DataType::Float32);
const TensorInfo info1({ 3, 4, 5 }, DataType::Float32);
@@ -111,7 +112,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DMergedTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
+TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
{
Graph graph;
const TensorInfo info0({ 5 }, DataType::Float32);
@@ -130,7 +131,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
input1->GetOutputSlot().Connect(sub->GetInputSlot(1));
sub->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<SubtractionLayer>,
@@ -140,7 +141,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -148,15 +149,15 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0");
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
+ CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
+ CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
+TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
{
Graph graph;
const TensorInfo info0({ 1, 4, 5 }, DataType::QAsymmS8);
@@ -175,7 +176,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
input1->GetOutputSlot().Connect(div->GetInputSlot(1));
div->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<DivisionLayer>,
@@ -185,7 +186,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -193,15 +194,15 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0");
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
+ CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
+ CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
+TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
{
Graph graph;
const TensorInfo info0({ 3, 5 }, DataType::QAsymmU8);
@@ -220,7 +221,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
input1->GetOutputSlot().Connect(mul->GetInputSlot(1));
mul->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<MultiplicationLayer>,
@@ -230,7 +231,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -238,15 +239,15 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
+ CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
+ CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
}
-BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
+TEST_CASE("AddNoBroadcastReshapeLayerTest")
{
Graph graph;
const TensorInfo info0({ 1, 1, 1, 1 }, DataType::QAsymmU8);
@@ -265,7 +266,7 @@ BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
input1->GetOutputSlot().Connect(mul->GetInputSlot(1));
mul->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<MultiplicationLayer>,
@@ -275,17 +276,17 @@ BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has not been added to the graph
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<MultiplicationLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
- BOOST_TEST(!reshapeLayer);
+ CHECK(!reshapeLayer);
}
-BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
+TEST_CASE("ReshapeParentConstLayerTest")
{
Graph graph;
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::QAsymmU8);
@@ -309,7 +310,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
constant->GetOutputSlot().Connect(mul->GetInputSlot(1));
mul->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<MultiplicationLayer>,
@@ -319,22 +320,22 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has not been added to the graph
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<MultiplicationLayer>,
&IsLayerOfType<OutputLayer>));
TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 };
- BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == expectedShape);
+ CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == expectedShape);
- BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetNumDimensions() == info0.GetNumDimensions());
+ CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetNumDimensions() == info0.GetNumDimensions());
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
- BOOST_TEST(!reshapeLayer);
+ CHECK(!reshapeLayer);
}
-BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
+TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
{
// In this test case we recreate the situation where an Addition layer has
// a constant second term, e.g. [1,512] + [1]. The AddBroadcastReshapeLayer
@@ -367,7 +368,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
// This second connection should prevent the modification of the const output tensor.
constant->GetOutputSlot().Connect(add2->GetInputSlot(1));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<AdditionLayer>,
@@ -378,7 +379,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape should have been added before each addition layer.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -388,14 +389,14 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
&IsLayerOfType<OutputLayer>));
// Ensure the output shape of the constant hasn't changed.
- BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
+ CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
// There should be two extra reshape layers with appropriate names.
Layer* const reshapeLayer1 = GetFirstLayerWithName(graph, "Reshape_for:add1-1");
Layer* const reshapeLayer2 = GetFirstLayerWithName(graph, "Reshape_for:add2-1");
- BOOST_TEST(reshapeLayer1);
- BOOST_TEST(reshapeLayer2);
+ CHECK(reshapeLayer1);
+ CHECK(reshapeLayer2);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index e4c1f2f413..b78a1bf207 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -8,14 +8,15 @@
#include <BFloat16.hpp>
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
+TEST_CASE("ConvertConstantsFloatToBFloatTest")
{
armnn::Graph graph;
@@ -48,27 +49,27 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));
// Check tensor data type after conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
// Check whether data matches expected Bf16 data
const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
- BOOST_CHECK(data[0] == BFloat16(0.0f));
- BOOST_CHECK(data[1] == BFloat16(-1.0f));
- BOOST_CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
- BOOST_CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
- BOOST_CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
- BOOST_CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
- BOOST_CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
- BOOST_CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
+ CHECK(data[0] == BFloat16(0.0f));
+ CHECK(data[1] == BFloat16(-1.0f));
+ CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
+ CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
+ CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
+ CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
+ CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
+ CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
}
-BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
+TEST_CASE("ConvertConstantsBFloatToFloatTest")
{
armnn::Graph graph;
@@ -104,24 +105,24 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
const float* data = fc->m_Weight->GetConstTensor<float>();
- BOOST_CHECK(data[0] == 0.0f);
- BOOST_CHECK(data[1] == -1.0f);
- BOOST_CHECK(data[2] == 3.796875f);
- BOOST_CHECK(data[3] == 3.1072295E29f);
- BOOST_CHECK(data[4] == 9.131327E-10f);
- BOOST_CHECK(data[5] == -3.796875f);
- BOOST_CHECK(data[6] == -3.1072295E29f);
- BOOST_CHECK(data[7] == -9.131327E-10f);
+ CHECK(data[0] == 0.0f);
+ CHECK(data[1] == -1.0f);
+ CHECK(data[2] == 3.796875f);
+ CHECK(data[3] == 3.1072295E29f);
+ CHECK(data[4] == 9.131327E-10f);
+ CHECK(data[5] == -3.796875f);
+ CHECK(data[6] == -3.1072295E29f);
+ CHECK(data[7] == -9.131327E-10f);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 1dfe7f431c..e6cca4f7bf 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -8,14 +8,15 @@
#include <Optimizer.hpp>
#include <Half.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
+TEST_CASE("ConvertConstantsFloatToHalfTest")
{
armnn::Graph graph;
@@ -41,20 +42,20 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether data matches expected fp16 data
const Half* data = fc->m_Weight->GetConstTensor<Half>();
- BOOST_CHECK(data[0] == Half(1.0f));
- BOOST_CHECK(data[1] == Half(2.0f));
- BOOST_CHECK(data[2] == Half(3.0f));
- BOOST_CHECK(data[3] == Half(4.0f));
+ CHECK(data[0] == Half(1.0f));
+ CHECK(data[1] == Half(2.0f));
+ CHECK(data[2] == Half(3.0f));
+ CHECK(data[3] == Half(4.0f));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 1ddf5262e8..2ec1279f33 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
+TEST_CASE("ConvertConstantsHalfToFloatTest")
{
armnn::Graph graph;
@@ -41,20 +42,20 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
const float* data = fc->m_Weight->GetConstTensor<float>();
- BOOST_CHECK(1.0f == data[0]);
- BOOST_CHECK(2.0f == data[1]);
- BOOST_CHECK(3.0f == data[2]);
- BOOST_CHECK(4.0f == data[3]);
+ CHECK(1.0f == data[0]);
+ CHECK(2.0f == data[1]);
+ CHECK(3.0f == data[2]);
+ CHECK(4.0f == data[3]);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 20cfab1cb7..7b4ac4170f 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -6,15 +6,16 @@
#include "LayersFwd.hpp"
#include <Network.hpp>
#include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <backendsCommon/TensorHandle.hpp>
#include <Optimizer.hpp>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn;
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
+TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
{
Graph graph;
const unsigned int inputShape[] = {1, 2, 2, 3};
@@ -67,7 +68,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
(conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimpleConv2d,
@@ -85,13 +86,13 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
(conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoConv2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
+TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
{
Graph graph;
const unsigned int inputShape[] = {1, 2, 2, 3};
@@ -146,7 +147,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
(depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimpleDepthwiseConv2d,
@@ -166,13 +167,13 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
(depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoDepthwiseConv2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer")
{
Graph graph;
const unsigned int inputShape[] = {1, 2, 2, 3};
@@ -218,7 +219,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
(pool2dLayer->GetParameters() == pooling2dDescriptor);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -248,13 +249,13 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
(pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoPool2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized)
+TEST_CASE("FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized")
{
// In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other
// goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the
@@ -308,7 +309,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBe
};
// Initial sequence.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -318,7 +319,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBe
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
// The network should not change.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -326,7 +327,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBe
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding")
{
// In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization
// should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude.
@@ -380,7 +381,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddi
(pool2dLayer->GetParameters() == pooling2dDescriptor);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -389,14 +390,14 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddi
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
// The optimization should not have modified the graph.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded")
{
// In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization
// should not work as the pad value will modify the result of the max pooling layer.
@@ -447,7 +448,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadV
(pool2dLayer->GetParameters() == pooling2dDescriptor);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -456,7 +457,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadV
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
// The optimization should not have modified the graph.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -464,7 +465,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadV
}
#if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization")
{
// The idea of this test to run a simple pad+pool2d network twice. Once
// with FoldPadLayerIntoPooling2dLayer enabled and a second time with it
@@ -523,7 +524,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
@@ -544,7 +545,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
// Optimize and load and execute it a second time.
optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
std::vector<float> goldenData(32, 0.0f);
std::vector<float> padOutputData(72, 0.0f);
OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -552,7 +553,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
// Now we can compare goldenData against optimizedData. They should be the same.
- BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+ CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
}
catch (const std::exception& e)
{
@@ -561,7 +562,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
}
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
{
// The idea of this test to run a simple pad+conv2d network twice. Once
// with FoldPadLayerIntoConv2dLayer enabled and a second time with it
@@ -641,7 +642,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
@@ -662,7 +663,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
// Optimize and load and execute it a second time.
optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
std::vector<float> goldenData(100, 0.0f);
std::vector<float> padOutputData(108, 0.0f);
OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -670,7 +671,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
// Now we can compare goldenData against optimizedData. They should be the same.
- BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+ CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
}
catch (const std::exception& e)
{
@@ -679,7 +680,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
}
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
{
// The idea of this test to run a simple pad+depthwiseconv2d network twice. Once
// with FoldPadLayerIntoDeptwiseConv2dLayer enabled and a second time with it
@@ -759,7 +760,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
@@ -780,7 +781,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
// Optimize and load and execute it a second time.
optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
std::vector<float> goldenData(300, 0.0f);
std::vector<float> padOutputData(108, 0.0f);
OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -788,7 +789,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
// Now we can compare goldenData against optimizedData. They should be the same.
- BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+ CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
}
catch (const std::exception& e)
{
@@ -798,4 +799,4 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index f93fa77b0d..384b14c0cf 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
{
armnn::Graph graph;
@@ -31,18 +32,18 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)
input->GetOutputSlot().Connect(floor->GetInputSlot(0));
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
{
armnn::Graph graph;
@@ -82,37 +83,37 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
input->GetOutputSlot().Connect(conv->GetInputSlot(0));
conv->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
+ CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
+ CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
+ CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
+ CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
+ CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
// Check whether data matches expected Bf16 data
const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
- BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
- BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
- BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
+ CHECK(data[0] == armnn::BFloat16(0.0f));
+ CHECK(data[1] == armnn::BFloat16(-1.0f));
+ CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
+ CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
+ CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
+ CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
+ CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
+ CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
{
armnn::Graph graph;
@@ -152,35 +153,35 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
+ CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
+ CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
+ CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
+ CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
+ CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
// Check whether data matches expected Bf16 data
const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
- BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
- BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
- BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
+ CHECK(data[0] == armnn::BFloat16(0.0f));
+ CHECK(data[1] == armnn::BFloat16(-1.0f));
+ CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
+ CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
+ CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
+ CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
+ CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
+ CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
index 16037a8c0f..e2ac1bd69e 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
+TEST_CASE("Fp32NetworkToFp16OptimizationTest")
{
armnn::Graph graph;
@@ -31,15 +32,15 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
input->GetOutputSlot().Connect(floor->GetInputSlot(0));
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::FloorLayer>,
&IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 71a554b567..9e332136f6 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -10,15 +10,15 @@
#include <armnn/INetwork.hpp>
#include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <QuantizeHelper.hpp>
#include <string>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer")
+{
namespace armnn
{
@@ -352,8 +352,8 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
(layer->GetNameStr() == "fused-activation-into-receiverLayer");
};
- BOOST_CHECK(3 == graphFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphFused.cbegin(),
+ CHECK(3 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
graphFused.cend(),
&IsLayerOfType<InputLayer>,
checkFusedConv2d,
@@ -361,7 +361,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
@@ -374,7 +374,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
// Execute network
- BOOST_TEST(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) == Status::Success);
+ CHECK(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) == Status::Success);
// SECOND NETWORK: NotFused
// Construct ArmNN network
@@ -388,8 +388,8 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
- BOOST_CHECK(5 == graphNotFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+ CHECK(5 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
graphNotFused.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<LayerType>,
@@ -399,7 +399,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// Load network into runtime
NetworkId networkIdentifierNotFused;
- BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+ CHECK(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<T> inputDataNotFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
@@ -413,14 +413,14 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
// Execute network
- BOOST_TEST(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
+ CHECK(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
== Status::Success);
// Check the output of the fused-activation matches with the output of the activation in the "NotFused" network
for (unsigned int n = 0; n < outputDataFused.size(); ++n)
{
- BOOST_CHECK_CLOSE(static_cast<float>(outputDataFused[n]), static_cast<float>(outputDataNotFused[n]),
- T(tolerance));
+ auto outputNotFused = static_cast<float>(outputDataNotFused[n]);
+ CHECK(static_cast<float>(outputDataFused[n]) == doctest::Approx(outputNotFused).epsilon(tolerance));
}
}
@@ -445,7 +445,7 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
@@ -476,7 +476,7 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
using namespace armnn;
#if defined(ARMCOMPUTENEON_ENABLED)
// ReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -484,7 +484,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -492,7 +492,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -500,7 +500,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -510,7 +510,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32CpuAccTest)
}
// BoundedReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -520,7 +520,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -530,7 +530,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::Float32 > , DataType::Float32 >
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -540,7 +540,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoBatchNormFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -552,7 +552,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32CpuAccTest)
}
// ReLU fused into Receiver Layers QAsymmU8
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoConvQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -560,7 +560,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvQAsymmU8CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -568,7 +568,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvQAsymmU8CpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -578,7 +578,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest)
}
// BoundedReLu fused into Receiver Layers QAsymmS8
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvQASymmS8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -588,7 +588,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>, DataType::QAsymmS8>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -598,7 +598,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest)
FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > , DataType::QAsymmS8 >
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -610,7 +610,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest)
}
// TanH fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseTanHIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -620,7 +620,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32CpuAccTest)
}
// HardSwish fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseHardSwishIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -630,7 +630,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32CpuAccTest)
}
// Test that all receiver layers follow by all activation layers work, either fused or not fused
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32CpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -638,17 +638,17 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32CpuAccTest)
activationDescriptor.m_Function = static_cast<ActivationFunction>(i);
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "DepthwiseConvolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "BatchNorm + Activation function " << i);
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16CpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat16CpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -656,59 +656,59 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16CpuAccTest)
activationDescriptor.m_Function = static_cast<ActivationFunction>(i);
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "DepthwiseConvolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "BatchNorm + Activation function " << i);
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8CpuAccTest)
+TEST_CASE("LayerFollowedByActivationQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::Sigmoid;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 256.f, 0)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 256.f, 0)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::TanH;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 128.f, 128)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 128.f, 128)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::ReLu;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::HardSwish;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
}
@@ -716,7 +716,7 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8CpuAccTest)
#if defined(ARMCOMPUTECL_ENABLED)
// ReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -724,7 +724,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -732,7 +732,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -740,7 +740,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -748,7 +748,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -756,7 +756,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -764,7 +764,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -772,7 +772,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -782,7 +782,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat32GpuAccTest)
}
// BoundedReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -792,7 +792,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -802,7 +802,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -812,7 +812,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoBatchNormFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -822,7 +822,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -832,7 +832,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -842,7 +842,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -852,7 +852,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -864,7 +864,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDivFloat32GpuAccTest)
}
// ReLu fused into Receiver Layers Float16
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -872,7 +872,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -880,7 +880,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -888,7 +888,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -896,7 +896,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoMulFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -904,7 +904,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoAddFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -912,7 +912,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoSubFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -920,7 +920,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoDivFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -930,7 +930,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat16GpuAccTest)
}
// ReLU fused into Receiver Layers QAsymmU8
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoConvAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoConvAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -938,7 +938,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUQIntoConvAsymmU8GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoDWConvAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoDWConvAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -946,7 +946,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUQIntoDWConvAsymmU8GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -956,7 +956,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest)
}
// BoundedReLu fused into Receiver Layers QAsymmS8
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvQASymmS8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -966,7 +966,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>, DataType::QAsymmS8>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -976,7 +976,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest)
FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > , DataType::QAsymmS8 >
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -988,7 +988,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest)
}
// TanH fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -996,7 +996,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1004,7 +1004,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1012,7 +1012,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1020,7 +1020,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1030,7 +1030,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoDivFloat32GpuAccTest)
}
// HardSwish fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1038,7 +1038,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1046,7 +1046,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1054,7 +1054,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1062,7 +1062,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1072,7 +1072,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoDivFloat32GpuAccTest)
}
// Test that all receiver layers follow by all activation layers work, either fused or not fused
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32GpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -1082,26 +1082,26 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32GpuAccTest)
activationDescriptor.m_B = -1.0f;
if (activationDescriptor.m_Function != ActivationFunction::Elu)
{
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "DepthwiseConvolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "BatchNorm + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Multiplication + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Addition + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Subtraction + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Division + Activation function " << i);
}
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16GpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -1111,71 +1111,71 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16GpuAccTest)
activationDescriptor.m_B = -1.0f;
if (activationDescriptor.m_Function != ActivationFunction::Elu)
{
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Depthwise + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "BatchNorm + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Multiplication + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Addition + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Subtraction + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Division + Activation function " << i);
}
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8GpuAccTest)
+TEST_CASE("LayerFollowedByActivationQAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::Sigmoid;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 256.f, 0)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 256.f, 0)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::TanH;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 128.f, 128)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 128.f, 128)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::ReLu;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::HardSwish;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index be66c5e4af..671f565054 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -10,12 +10,12 @@
#include <armnn/INetwork.hpp>
#include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer")
+{
namespace
{
@@ -194,8 +194,8 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
(layer->GetNameStr() == "fused-batchNorm-into-convolution");
};
- BOOST_CHECK(3 == graphFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphFused.cbegin(),
+ CHECK(3 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
graphFused.cend(),
&IsLayerOfType<InputLayer>,
checkFusedConv2d,
@@ -203,7 +203,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<T> inputDataFused = GetVector<T>(48, 1.0f, 0.1f);
@@ -235,8 +235,8 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
- BOOST_CHECK(5 == graphNotFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+ CHECK(5 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
graphNotFused.cend(),
&IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<ConvLayerType>,
@@ -246,7 +246,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// Load network into runtime
NetworkId networkIdentifierNotFused;
- BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+ CHECK(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<T> inputDataNotFused = GetVector<T>(48, 1.0f, 0.1f);
@@ -269,33 +269,34 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused);
// Check the output of the fused-convolution matches with the output of the batchNormm in the "NotFused" network
+ auto epsilon = T(tolerance);
for (unsigned int n = 0; n < outputDataFused.size(); ++n)
{
- BOOST_CHECK_CLOSE(outputDataFused[n], outputDataNotFused[n], T(tolerance));
+ CHECK_EQ(outputDataFused[n], doctest::Approx(outputDataNotFused[n]).epsilon(epsilon));
}
}
// This unit test needs the reference backend, it's not available if the reference backend is not built
#if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoConv2DFloat32Test)
+TEST_CASE("FuseBatchNormIntoConv2DFloat32Test")
{
FuseBatchNormIntoConvTest<Conv2dTest, DataType::Float32>(false, 0.0001f, armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoConv2DFloat16Test)
+TEST_CASE("FuseBatchNormIntoConv2DFloat16Test")
{
FuseBatchNormIntoConvTest<Conv2dTest, DataType::Float16>(false, 0.1f, armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoDepthwiseConv2DFloat32Test)
+TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat32Test")
{
FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float32>(true, 0.0001f,armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoDepthwiseConv2DFloat16Test)
+TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat16Test")
{
FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float16>(true, 0.1f,armnn::Compute::CpuRef);
}
#endif
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
index 38b6397142..03d0d22f95 100644
--- a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
+++ b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
+TEST_CASE("InsertDebugOptimizationTest")
{
armnn::Graph graph;
@@ -31,15 +32,15 @@ BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
input->GetOutputSlot().Connect(floor->GetInputSlot(0));
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::FloorLayer>,
&IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 13c692670a..38a65a6173 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
+TEST_CASE("MovePermuteUpTest")
{
const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
@@ -66,7 +67,7 @@ BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
->GetOutputHandler()
.SetTensorInfo(info);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
@@ -76,7 +77,7 @@ BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
// The permute is moved to the top. New permutes for layers with multiple inputs.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
@@ -86,7 +87,7 @@ BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
std::list<std::string> testRelatedLayers = { permuteLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index cb41ff0dc1..68d277a4bd 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
+TEST_CASE("MoveTransposeUpTest")
{
const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
const armnn::TensorInfo transposed({ 1, 3, 5, 2 }, armnn::DataType::Float32);
@@ -67,7 +68,7 @@ BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
->GetOutputHandler()
.SetTensorInfo(info);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
@@ -77,7 +78,7 @@ BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
// The transpose is moved to the top. New transposes for layers with multiple inputs.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
@@ -87,7 +88,7 @@ BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
std::list<std::string> testRelatedLayers = { transposeLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
index 8c3c435265..694b103091 100644
--- a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
+TEST_CASE("OptimizeConsecutiveReshapesTest")
{
armnn::Graph graph;
@@ -39,7 +40,7 @@ BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
reshape1->GetOutputHandler().SetTensorInfo(info1);
reshape2->GetOutputHandler().SetTensorInfo(info2);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
&IsLayerOfType<armnn::OutputLayer>));
@@ -53,13 +54,13 @@ BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
};
// The two reshapes are replaced by a single equivalent reshape.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
// Check the new reshape layer has the other two reshapes as related layers
std::list<std::string> testRelatedLayers = { reshape2Name, reshape1Name };
- BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
{
@@ -72,9 +73,9 @@ BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
// The two reshapes are removed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
index d87113c209..4b6dfe582b 100644
--- a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
+TEST_CASE("OptimizeInverseConversionsTest")
{
armnn::Graph graph;
@@ -32,7 +33,7 @@ BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert3");
graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert4");
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
&IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
&IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
@@ -42,8 +43,8 @@ BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
graph, armnn::MakeOptimizations(OptimizeInverseConversionsFp16(), OptimizeInverseConversionsFp32()));
// Check that all consecutive inverse conversions are removed
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
index 0664ef73b8..98c84d4fc2 100644
--- a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
+TEST_CASE("OptimizeInversePermutesTest")
{
armnn::Graph graph;
@@ -28,18 +29,18 @@ BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 3, 1, 2 }),
"perm0312");
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInversePermutes()));
// The permutes are removed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(OptimizeInverseTransposesTest)
+TEST_CASE("OptimizeInverseTransposesTest")
{
armnn::Graph graph;
@@ -55,15 +56,15 @@ BOOST_AUTO_TEST_CASE(OptimizeInverseTransposesTest)
armnn::TransposeDescriptor({ 0, 2, 3, 1 }),
"transpose0231");
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInverseTransposes()));
// The permutes are removed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index ab990e7c82..e91e16f132 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -8,11 +8,12 @@
#include <Network.hpp>
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
namespace
@@ -83,13 +84,13 @@ std::unique_ptr<NetworkImpl> CreateTransposeTestNetworkImpl()
/// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
/// Note this does not ensure the correctness of the optimization - that is done in the below test.
-BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
+TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest")
{
std::unique_ptr<NetworkImpl> network = CreateTestNetworkImpl();
Graph graph = network.get()->GetGraph();
// Confirm initial graph is as we expect
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
&IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
// Perform the optimization which should merge the two layers into a DepthToSpace
@@ -103,23 +104,23 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
&IsLayerOfType<OutputLayer>));
// Check the new layer has the two merged layers listed as related layers
std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
- BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
}
/// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
/// Note this does not ensure the correctness of the optimization - that is done in the below test.
-BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
+TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest")
{
std::unique_ptr<NetworkImpl> network = CreateTransposeTestNetworkImpl();
Graph graph = network.get()->GetGraph();
// Confirm initial graph is as we expect
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
&IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
// Perform the optimization which should merge the two layers into a DepthToSpace
@@ -133,12 +134,12 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
&IsLayerOfType<OutputLayer>));
// Check the new layer has the two merged layers listed as related layers
std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
- BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
}
// This unit test needs the reference backend, it's not available if the reference backend is not built
@@ -208,7 +209,7 @@ INetworkPtr CreateTransposeTestNetwork()
/// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
/// of the network (i.e. it still produces the correct output).
-BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
+TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
{
INetworkPtr network = CreateTestNetwork();
@@ -217,7 +218,7 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
// Confirm that the optimization has actually taken place
const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
- BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
+ CHECK(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
&IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
// Load the graph into a runtime so we can check it produces the correct output
@@ -250,12 +251,12 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
-3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f,
// clang-format on
};
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
/// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
/// of the network (i.e. it still produces the correct output).
-BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
+TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
{
INetworkPtr network = CreateTransposeTestNetwork();
@@ -264,7 +265,7 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
// Confirm that the optimization has actually taken place
const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
- BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
+ CHECK(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
&IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
// Load the graph into a runtime so we can check it produces the correct output
@@ -297,8 +298,8 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
-3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f,
// clang-format on
};
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
index 3f3c254d9a..fdd0a6ddd3 100644
--- a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
+TEST_CASE("PermuteAsReshapeTest")
{
armnn::Graph graph;
@@ -36,7 +37,7 @@ BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
->GetOutputHandler()
.SetTensorInfo(infoOut);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(PermuteAsReshape()));
@@ -50,11 +51,11 @@ BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
(reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { permuteLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
index b42c0a2cfb..df9a0dbc39 100644
--- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
+++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
@@ -8,12 +8,12 @@
#include <armnn/INetwork.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer_ReduceMultipleAxes")
+{
INetworkPtr CreateSimpleReduceNetwork(ReduceDescriptor reduceDescriptor,
TensorShape& inputShape,
TensorShape& outputShape)
@@ -22,10 +22,10 @@ INetworkPtr CreateSimpleReduceNetwork(ReduceDescriptor reduceDescriptor,
INetworkPtr network = INetwork::Create();
const std::string layerName("reduce_layer");
- const TensorInfo inputInfo (inputShape, DataType::Float32);
+ const TensorInfo inputInfo(inputShape, DataType::Float32);
const TensorInfo outputInfo(outputShape, DataType::Float32);
- IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+ IConnectableLayer* const inputLayer = network->AddInputLayer(0);
IConnectableLayer* const reduceLayer = network->AddReduceLayer(reduceDescriptor, layerName.c_str());
IConnectableLayer* const outputLayer1 = network->AddOutputLayer(0);
IConnectableLayer* const outputLayer2 = network->AddOutputLayer(1);
@@ -56,37 +56,36 @@ void ReduceWithMultipleAxesTest(INetworkPtr& network,
Graph& graph = GetGraphForTesting(optNet.get());
if (numOfAxes == 2)
{
- BOOST_CHECK(graph.GetNumLayers() == 5);
- BOOST_TEST(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<OutputLayer>,
- &IsLayerOfType<OutputLayer>));
- }
- else
+ CHECK(graph.GetNumLayers() == 5);
+ CHECK(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<OutputLayer>,
+ &IsLayerOfType<OutputLayer>));
+ } else
{
- BOOST_CHECK(graph.GetNumLayers() == 6);
- BOOST_TEST(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<OutputLayer>,
- &IsLayerOfType<OutputLayer>));
+ CHECK(graph.GetNumLayers() == 6);
+ CHECK(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<OutputLayer>,
+ &IsLayerOfType<OutputLayer>));
}
// Get last layer in new chain, layers name follow 0, 1, 2 pattern
std::string layerName = "reduce_layer_" + std::to_string(numOfAxes - 1);
Layer* const reduceLayer = GetFirstLayerWithName(graph, layerName);
- BOOST_TEST(reduceLayer);
+ CHECK(reduceLayer);
auto reduceTensorInfo = reduceLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((reduceTensorInfo.GetShape() == outputShape));
- BOOST_TEST((reduceTensorInfo.GetDataType() == DataType::Float32));
+ CHECK((reduceTensorInfo.GetShape() == outputShape));
+ CHECK((reduceTensorInfo.GetDataType() == DataType::Float32));
// Load network into runtime
NetworkId networkIdentifier;
@@ -95,45 +94,45 @@ void ReduceWithMultipleAxesTest(INetworkPtr& network,
// Create input and output tensors
std::vector<float> outputData(expectedOutput.size());
InputTensors inputTensors
- {
- { 0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data()) }
- };
+ {
+ {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ };
OutputTensors outputTensors
- {
- { 0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data()) },
- { 1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data()) }
- };
+ {
+ {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())},
+ {1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data())}
+ };
// Run inference
run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
// Checks the results
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
void ReduceSumWithTwoAxesKeepDimsTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 1, 2 };
+ reduceDescriptor.m_vAxis = {1, 2};
reduceDescriptor.m_KeepDims = true;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 1, 3, 2, 4 };
- TensorShape outputShape = { 1, 1, 1, 4 };
+ TensorShape inputShape = {1, 3, 2, 4};
+ TensorShape outputShape = {1, 1, 1, 4};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
+ const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
- 10.0f, 20.0f, 30.0f, 40.0f,
- 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 20.0f, 30.0f, 40.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
- 100.0f, 200.0f, 300.0f, 400.0f,
- 500.0f, 600.0f, 700.0f, 800.0f });
- const std::vector<float> expectedOutput({ 666.0f, 888.0f, 1110.0f, 1332.0f });
+ 100.0f, 200.0f, 300.0f, 400.0f,
+ 500.0f, 600.0f, 700.0f, 800.0f});
+ const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -146,26 +145,26 @@ void ReduceSumWithTwoAxesKeepDimsTest(Compute backendId)
void ReduceSumWithTwoAxesTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 1, 2 };
+ reduceDescriptor.m_vAxis = {1, 2};
reduceDescriptor.m_KeepDims = false;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 1, 3, 2, 4 };
- TensorShape outputShape = { 1, 4 };
+ TensorShape inputShape = {1, 3, 2, 4};
+ TensorShape outputShape = {1, 4};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
+ const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
- 10.0f, 20.0f, 30.0f, 40.0f,
- 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 20.0f, 30.0f, 40.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
- 100.0f, 200.0f, 300.0f, 400.0f,
- 500.0f, 600.0f, 700.0f, 800.0f });
- const std::vector<float> expectedOutput({ 666.0f, 888.0f, 1110.0f, 1332.0f });
+ 100.0f, 200.0f, 300.0f, 400.0f,
+ 500.0f, 600.0f, 700.0f, 800.0f});
+ const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -178,29 +177,29 @@ void ReduceSumWithTwoAxesTest(Compute backendId)
void ReduceSumWithThreeAxesKeepDimsTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 0, 2, 3 };
+ reduceDescriptor.m_vAxis = {0, 2, 3};
reduceDescriptor.m_KeepDims = true;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 2, 2, 2, 2 };
- TensorShape outputShape = { 1, 2, 1, 1 };
+ TensorShape inputShape = {2, 2, 2, 2};
+ TensorShape outputShape = {1, 2, 1, 1};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f,
- 3.0f, 4.0f,
+ const std::vector<float> inputData({1.0f, 2.0f,
+ 3.0f, 4.0f,
- 5.0f, 6.0f,
- 7.0f, 8.0f,
+ 5.0f, 6.0f,
+ 7.0f, 8.0f,
- 10.0f, 20.0f,
- 30.0f, 40.0f,
+ 10.0f, 20.0f,
+ 30.0f, 40.0f,
- 50.0f, 60.0f,
- 70.0f, 80.0f });
- const std::vector<float> expectedOutput({ 110.0f, 286.0f });
+ 50.0f, 60.0f,
+ 70.0f, 80.0f});
+ const std::vector<float> expectedOutput({110.0f, 286.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -213,29 +212,29 @@ void ReduceSumWithThreeAxesKeepDimsTest(Compute backendId)
void ReduceSumWithThreeAxesTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 0, 2, 3 };
+ reduceDescriptor.m_vAxis = {0, 2, 3};
reduceDescriptor.m_KeepDims = false;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 2, 2, 2, 2 };
- TensorShape outputShape = { 2 };
+ TensorShape inputShape = {2, 2, 2, 2};
+ TensorShape outputShape = {2};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f,
- 3.0f, 4.0f,
+ const std::vector<float> inputData({1.0f, 2.0f,
+ 3.0f, 4.0f,
- 5.0f, 6.0f,
- 7.0f, 8.0f,
+ 5.0f, 6.0f,
+ 7.0f, 8.0f,
- 10.0f, 20.0f,
- 30.0f, 40.0f,
+ 10.0f, 20.0f,
+ 30.0f, 40.0f,
- 50.0f, 60.0f,
- 70.0f, 80.0f });
- const std::vector<float> expectedOutput({ 110.0f, 286.0f });
+ 50.0f, 60.0f,
+ 70.0f, 80.0f});
+ const std::vector<float> expectedOutput({110.0f, 286.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -247,47 +246,47 @@ void ReduceSumWithThreeAxesTest(Compute backendId)
using namespace armnn;
#if defined(ARMCOMPUTENEON_ENABLED)
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesKeepDimsCpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesKeepDimsCpuAccTest")
{
ReduceSumWithTwoAxesKeepDimsTest(Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesCpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesCpuAccTest")
{
ReduceSumWithTwoAxesTest(Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesKeepDimsCpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesKeepDimsCpuAccTest")
{
ReduceSumWithThreeAxesKeepDimsTest(Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesCpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesCpuAccTest")
{
ReduceSumWithThreeAxesTest(Compute::CpuAcc);
}
#endif
#if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesKeepDimsGpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesKeepDimsGpuAccTest")
{
ReduceSumWithTwoAxesKeepDimsTest(Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesGpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesGpuAccTest")
{
ReduceSumWithTwoAxesTest(Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesKeepDimsGpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesKeepDimsGpuAccTest")
{
ReduceSumWithThreeAxesKeepDimsTest(Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesGpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesGpuAccTest")
{
ReduceSumWithThreeAxesTest(Compute::GpuAcc);
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
index 1c97267d89..069d28457e 100644
--- a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
+++ b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
+TEST_CASE("SquashEqualSiblingsTest")
{
armnn::Graph graph;
@@ -54,7 +55,7 @@ BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
input->GetOutputSlot().Connect(layer->GetInputSlot(0));
- BOOST_TEST(CheckSequence(
+ CHECK(CheckSequence(
graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::PermuteLayer>,
&IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
@@ -64,11 +65,11 @@ BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
// The permutes and reshapes are squashed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>,
&IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
&IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
index 1c9f15ce8d..5d1d950573 100644
--- a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
+TEST_CASE("TransposeAsReshapeTest")
{
armnn::Graph graph;
@@ -36,7 +37,7 @@ BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
->GetOutputHandler()
.SetTensorInfo(infoOut);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(TransposeAsReshape()));
@@ -50,11 +51,11 @@ BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
(reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { transposeLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file