aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/ConstTensorLayerVisitor.cpp150
-rw-r--r--src/armnn/test/CreateWorkload.hpp557
-rw-r--r--src/armnn/test/DebugCallbackTest.cpp18
-rw-r--r--src/armnn/test/EndToEndTest.cpp14
-rw-r--r--src/armnn/test/ExecutionFrameTest.cpp15
-rw-r--r--src/armnn/test/FloatingPointConverterTest.cpp26
-rw-r--r--src/armnn/test/FlowControl.cpp14
-rw-r--r--src/armnn/test/GraphTests.cpp198
-rw-r--r--src/armnn/test/InferOutputTests.cpp8
-rw-r--r--src/armnn/test/InferOutputTests.hpp98
-rw-r--r--src/armnn/test/InstrumentTests.cpp24
-rw-r--r--src/armnn/test/ModelAccuracyCheckerTest.cpp22
-rw-r--r--src/armnn/test/NetworkTests.cpp244
-rw-r--r--src/armnn/test/ObservableTest.cpp32
-rw-r--r--src/armnn/test/OptimizerTests.cpp108
-rw-r--r--src/armnn/test/OptionalTest.cpp106
-rw-r--r--src/armnn/test/ProfilerTests.cpp72
-rw-r--r--src/armnn/test/ProfilingEventTest.cpp32
-rw-r--r--src/armnn/test/RuntimeTests.cpp265
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp127
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp226
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp57
-rw-r--r--src/armnn/test/TensorHelpers.hpp2
-rw-r--r--src/armnn/test/TensorTest.cpp380
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.cpp16
-rw-r--r--src/armnn/test/TestInputOutputLayerVisitor.hpp4
-rw-r--r--src/armnn/test/TestLayerVisitor.cpp23
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp136
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp3
-rw-r--r--src/armnn/test/TestNameOnlyLayerVisitor.cpp61
-rw-r--r--src/armnn/test/UnitTests.cpp37
-rw-r--r--src/armnn/test/UnitTests.hpp42
-rw-r--r--src/armnn/test/UtilityTests.cpp188
-rw-r--r--src/armnn/test/UtilsTests.cpp156
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp95
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp51
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp21
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp21
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp67
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp77
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp13
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp242
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp31
-rw-r--r--src/armnn/test/optimizations/InsertDebugLayerTests.cpp13
-rw-r--r--src/armnn/test/optimizations/MovePermuteUpTests.cpp15
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp15
-rw-r--r--src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp17
-rw-r--r--src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp13
-rw-r--r--src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp19
-rw-r--r--src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp35
-rw-r--r--src/armnn/test/optimizations/PermuteAsReshapeTests.cpp15
-rw-r--r--src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp175
-rw-r--r--src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp13
-rw-r--r--src/armnn/test/optimizations/TransposeAsReshapeTests.cpp15
54 files changed, 2251 insertions, 2173 deletions
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index f3485c704b..baafcf41ef 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -6,56 +6,56 @@
#include "ConstTensorLayerVisitor.hpp"
#include "Network.hpp"
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
namespace armnn
{
void TestConvolution2dLayerVisitor::CheckDescriptor(const Convolution2dDescriptor &convolution2dDescriptor)
{
- BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
- BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
- BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
- BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
- BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
- BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
- BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
- BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
+ CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
+ CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
+ CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
+ CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
+ CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
+ CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
+ CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
+ CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
}
void TestDepthwiseConvolution2dLayerVisitor::CheckDescriptor(
const DepthwiseConvolution2dDescriptor& convolution2dDescriptor)
{
- BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
- BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
- BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
- BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
- BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
- BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
- BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
- BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
+ CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
+ CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
+ CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
+ CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
+ CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
+ CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
+ CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
+ CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
}
void TestFullyConnectedLayerVistor::CheckDescriptor(const FullyConnectedDescriptor& descriptor)
{
- BOOST_CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
- BOOST_CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
+ CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
+ CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
}
void TestBatchNormalizationLayerVisitor::CheckDescriptor(const BatchNormalizationDescriptor& descriptor)
{
- BOOST_CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
- BOOST_CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
+ CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
+ CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
}
void TestLstmLayerVisitor::CheckDescriptor(const LstmDescriptor& descriptor)
{
- BOOST_CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
- BOOST_CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
- BOOST_CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
- BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
- BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
- BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
+ CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
+ CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
+ CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
+ CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
+ CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
+ CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
}
void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
@@ -64,11 +64,11 @@ void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
{
if (expected == nullptr)
{
- BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
}
else
{
- BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
if (actual != nullptr)
{
CheckConstTensors(*expected, *actual);
@@ -113,11 +113,11 @@ void TestQLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
{
if (expected == nullptr)
{
- BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
}
else
{
- BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
if (actual != nullptr)
{
CheckConstTensors(*expected, *actual);
@@ -127,11 +127,11 @@ void TestQLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
{
- BOOST_CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
- BOOST_CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
- BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
- BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
- BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
+ CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
+ CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
+ CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
+ CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
+ CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
}
void TestQLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
@@ -211,11 +211,11 @@ void TestQuantizedLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name
{
if (expected == nullptr)
{
- BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+ CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
}
else
{
- BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+ CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
if (actual != nullptr)
{
CheckConstTensors(*expected, *actual);
@@ -263,9 +263,9 @@ void TestQuantizedLstmLayerVisitor::CheckInputParameters(const QuantizedLstmInpu
CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
}
-BOOST_AUTO_TEST_SUITE(TestConstTensorLayerVisitor)
-
-BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
+TEST_SUITE("TestConstTensorLayerVisitor")
+{
+TEST_CASE("CheckConvolution2dLayer")
{
Convolution2dDescriptor descriptor;
descriptor.m_PadLeft = 2;
@@ -288,7 +288,7 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
+TEST_CASE("CheckNamedConvolution2dLayer")
{
const char* layerName = "Convolution2dLayer";
Convolution2dDescriptor descriptor;
@@ -312,7 +312,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
+TEST_CASE("CheckConvolution2dLayerWithBiases")
{
Convolution2dDescriptor descriptor;
descriptor.m_PadLeft = 2;
@@ -341,7 +341,7 @@ BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
+TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
{
const char* layerName = "Convolution2dLayer";
Convolution2dDescriptor descriptor;
@@ -371,7 +371,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
+TEST_CASE("CheckDepthwiseConvolution2dLayer")
{
DepthwiseConvolution2dDescriptor descriptor;
descriptor.m_PadLeft = 2;
@@ -394,7 +394,7 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
+TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
{
const char* layerName = "DepthwiseConvolution2dLayer";
DepthwiseConvolution2dDescriptor descriptor;
@@ -421,7 +421,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
+TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
{
DepthwiseConvolution2dDescriptor descriptor;
descriptor.m_PadLeft = 2;
@@ -450,7 +450,7 @@ BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
+TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
{
const char* layerName = "DepthwiseConvolution2dLayer";
DepthwiseConvolution2dDescriptor descriptor;
@@ -480,7 +480,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
+TEST_CASE("CheckFullyConnectedLayer")
{
FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
@@ -497,7 +497,7 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
+TEST_CASE("CheckNamedFullyConnectedLayer")
{
const char* layerName = "FullyConnectedLayer";
FullyConnectedDescriptor descriptor;
@@ -515,7 +515,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
+TEST_CASE("CheckFullyConnectedLayerWithBiases")
{
FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
@@ -538,7 +538,7 @@ BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
+TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
{
const char* layerName = "FullyConnectedLayer";
FullyConnectedDescriptor descriptor;
@@ -562,7 +562,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
+TEST_CASE("CheckBatchNormalizationLayer")
{
BatchNormalizationDescriptor descriptor;
descriptor.m_Eps = 0.0002f;
@@ -592,7 +592,7 @@ BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
+TEST_CASE("CheckNamedBatchNormalizationLayer")
{
const char* layerName = "BatchNormalizationLayer";
BatchNormalizationDescriptor descriptor;
@@ -624,7 +624,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckConstLayer)
+TEST_CASE("CheckConstLayer")
{
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
std::vector<unsigned int> dimensions = {1, 1, 3, 3};
@@ -638,7 +638,7 @@ BOOST_AUTO_TEST_CASE(CheckConstLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
+TEST_CASE("CheckNamedConstLayer")
{
const char* layerName = "ConstantLayer";
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
@@ -653,7 +653,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
+TEST_CASE("CheckLstmLayerBasic")
{
LstmDescriptor descriptor;
descriptor.m_ActivationFunc = 3;
@@ -725,7 +725,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
+TEST_CASE("CheckNamedLstmLayerBasic")
{
const char* layerName = "LstmLayer";
LstmDescriptor descriptor;
@@ -798,7 +798,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
+TEST_CASE("CheckLstmLayerCifgDisabled")
{
LstmDescriptor descriptor;
descriptor.m_ActivationFunc = 3;
@@ -889,7 +889,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
+TEST_CASE("CheckNamedLstmLayerCifgDisabled")
{
const char* layerName = "LstmLayer";
LstmDescriptor descriptor;
@@ -982,7 +982,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
}
// TODO add one with peephole
-BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
+TEST_CASE("CheckLstmLayerPeephole")
{
LstmDescriptor descriptor;
descriptor.m_ActivationFunc = 3;
@@ -1068,7 +1068,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckLstmLayerPeepholeCifgDisabled)
+TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
{
LstmDescriptor descriptor;
descriptor.m_ActivationFunc = 3;
@@ -1182,7 +1182,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerPeepholeCifgDisabled)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
+TEST_CASE("CheckNamedLstmLayerPeephole")
{
const char* layerName = "LstmLayer";
LstmDescriptor descriptor;
@@ -1270,7 +1270,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
}
// TODO add one with projection
-BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
+TEST_CASE("CheckLstmLayerProjection")
{
LstmDescriptor descriptor;
descriptor.m_ActivationFunc = 3;
@@ -1356,7 +1356,7 @@ BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
+TEST_CASE("CheckNamedLstmLayerProjection")
{
const char* layerName = "LstmLayer";
LstmDescriptor descriptor;
@@ -1443,7 +1443,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerBasic)
+TEST_CASE("CheckQLstmLayerBasic")
{
QLstmDescriptor descriptor;
descriptor.m_ProjectionClip = 0.5f;
@@ -1515,7 +1515,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerBasic)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedQLstmLayerBasic)
+TEST_CASE("CheckNamedQLstmLayerBasic")
{
const char* layerName = "QLstmLayer";
QLstmDescriptor descriptor;
@@ -1588,7 +1588,7 @@ BOOST_AUTO_TEST_CASE(CheckNamedQLstmLayerBasic)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabled)
+TEST_CASE("CheckQLstmLayerCifgDisabled")
{
QLstmDescriptor descriptor;
descriptor.m_ProjectionClip = 0.5f;
@@ -1683,7 +1683,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabled)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledPeepholeEnabled)
+TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
{
QLstmDescriptor descriptor;
descriptor.m_ProjectionClip = 0.5f;
@@ -1800,7 +1800,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledPeepholeEnabled)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgEnabledPeepholeEnabled)
+TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
{
QLstmDescriptor descriptor;
descriptor.m_ProjectionClip = 0.5f;
@@ -1890,7 +1890,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgEnabledPeepholeEnabled)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerProjectionEnabled)
+TEST_CASE("CheckQLstmLayerProjectionEnabled")
{
QLstmDescriptor descriptor;
descriptor.m_ProjectionClip = 0.5f;
@@ -1980,7 +1980,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerProjectionEnabled)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledLayerNormEnabled)
+TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
{
QLstmDescriptor descriptor;
descriptor.m_ProjectionClip = 0.5f;
@@ -2104,7 +2104,7 @@ BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledLayerNormEnabled)
}
-BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
+TEST_CASE("CheckQuantizedLstmLayer")
{
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
@@ -2193,7 +2193,7 @@ BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
+TEST_CASE("CheckNamedQuantizedLstmLayer")
{
const char* layerName = "LstmLayer";
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -2283,6 +2283,6 @@ BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
} // namespace armnn
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 12623e62a0..581c621a16 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -11,6 +11,7 @@
#include <ResolveType.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
@@ -18,7 +19,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <utility>
@@ -36,11 +37,11 @@ std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer,
const ModelOptions& modelOptions = {})
{
std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory);
- BOOST_TEST(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
+ CHECK_MESSAGE(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
"Cannot convert to derived class");
std::string reasonIfUnsupported;
layer.SetBackendId(factory.GetBackendId());
- BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
+ CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
}
@@ -90,11 +91,11 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, factory);
ActivationQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_A == 3.5f);
- BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f);
- BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
+ CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
+ CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -126,8 +127,8 @@ std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFact
auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
DescriptorType queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -165,9 +166,9 @@ std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWork
std::shared_ptr<ActivationDescriptor>
activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -179,14 +180,14 @@ std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWork
const ActivationDescriptor* queueDescBlobPtr =
queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
IgnoreUnused(queueDescBlobPtr);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
return workload;
}
@@ -223,9 +224,9 @@ std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IW
std::shared_ptr<ActivationDescriptor>
activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -233,14 +234,14 @@ std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IW
auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
DescriptorType queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
const ActivationDescriptor* queueDescBlobPtr =
queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
IgnoreUnused(queueDescBlobPtr);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -279,9 +280,9 @@ std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloa
std::shared_ptr<ActivationDescriptor>
activationDescPtr = layer->template GetAdditionalInformation<ActivationDescriptor>();
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -292,11 +293,11 @@ std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloa
const ActivationDescriptor* queueDescBlobPtr =
queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
IgnoreUnused(queueDescBlobPtr);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -324,8 +325,8 @@ std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloa
auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
DescriptorType queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
return workload;
}
@@ -375,14 +376,14 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkload
// Makes the workload and checks it.
auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -429,9 +430,9 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlob
// Check that the additional information can be queried from the layer
std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -450,20 +451,20 @@ std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlob
BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
IgnoreUnused(queueDescBlobPtr);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
- BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -511,19 +512,19 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
- BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
TensorInfo({2}, GetBiasDataType(DataType))));
// Returns so we can do extra, backend-specific tests.
@@ -571,9 +572,9 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
// Check that the additional information can be queried from the layer
std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
@@ -592,25 +593,25 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
IgnoreUnused(queueDescBlobPtr);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
- BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
TensorInfo({2}, GetBiasDataType(DataType))));
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -658,17 +659,17 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a
auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 0);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 0);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 0);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 0);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 0);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 0);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -760,17 +761,17 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
// make the workload and check it
auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, factory);
LstmQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
- BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
- BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 4);
+ CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
+ CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
+ CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 4);
- BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
+ CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
DataType::Float32)));
- BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
+ CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
DataType::Float32)));
- BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
+ CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
return workload;
}
@@ -891,24 +892,24 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
// Validate input/output sizes
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 2);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 2);
// Validate weight tensor info
- BOOST_TEST((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
- BOOST_TEST((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
- BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
- BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
return workload;
}
@@ -1054,22 +1055,22 @@ std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory&
// Create and check workload
auto workload = MakeAndCheckWorkload<QLstmWorkload>(*layer, factory);
QLstmQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
- BOOST_TEST(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 3);
+ CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
+ CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 3);
- BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
- BOOST_TEST((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
- BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
- BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
return workload;
}
@@ -1112,19 +1113,19 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
-
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
DataType, inputsQScale)));
- BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo()
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo()
== TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)));
// Returns so we can do extra, backend-specific tests.
@@ -1169,18 +1170,18 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, factory);
DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == false);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 2, 4, 4}, DataType)));
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 2, 4, 4}, DataType)));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1218,13 +1219,13 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
- BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+ CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
- BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1259,9 +1260,9 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
// Check that the additional information can be queried from the layer
std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- BOOST_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
armnn::ActivationFunction::BoundedReLu);
// Creates extra layers.
@@ -1281,18 +1282,18 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
IgnoreUnused(queueDescBlobPtr);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- BOOST_ASSERT(
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
);
- BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
- BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
- BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
- BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+ CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1336,16 +1337,16 @@ std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IW
auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, factory);
NormalizationQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
- BOOST_TEST((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
- BOOST_TEST(queueDescriptor.m_Parameters.m_NormSize == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
- BOOST_TEST(queueDescriptor.m_Parameters.m_Beta == -1.0f);
- BOOST_TEST(queueDescriptor.m_Parameters.m_K == 0.2f);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
+ CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
+ CHECK(queueDescriptor.m_Parameters.m_NormSize == 3);
+ CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
+ CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f);
+ CHECK(queueDescriptor.m_Parameters.m_K == 0.2f);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1388,20 +1389,20 @@ std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadF
auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, factory);
Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
- BOOST_TEST((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
- BOOST_TEST(queueDescriptor.m_Parameters.m_PoolWidth == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PoolHeight == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 3);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
- BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
+ CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
+ CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3);
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Return so we can do extra, backend-specific tests
return workload;
@@ -1445,8 +1446,8 @@ std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto
auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, factory);
SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Return so we can do extra, backend-specific tests.
return workload;
@@ -1494,19 +1495,19 @@ std::unique_ptr<SplitterWorkload>
auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, factory);
SplitterQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 3);
- BOOST_TEST(queueDescriptor.m_ViewOrigins.size() == 3);
-
- BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
- BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 3);
+ CHECK(queueDescriptor.m_ViewOrigins.size() == 3);
+
+ CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
+ CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
+ CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1536,8 +1537,9 @@ std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
splitterViews.SetViewOriginCoord(1, 2, 0);
splitterViews.SetViewOriginCoord(1, 3, 0);
+ // create splitter layer
Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
- BOOST_TEST_CHECKPOINT("created splitter layer");
+ CHECK(splitter);
armnn::OriginsDescriptor concatViews(2);
concatViews.SetViewOriginCoord(0, 0, 0);
@@ -1550,28 +1552,31 @@ std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
concatViews.SetViewOriginCoord(1, 2, 0);
concatViews.SetViewOriginCoord(1, 3, 0);
+ // create concat layer
Layer* const concat = graph.AddLayer<ConcatLayer>(concatViews, "concat");
- BOOST_TEST_CHECKPOINT("created concat layer");
+ CHECK(concat);
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// Adds connections.
+ // connect input to splitter
Connect(input, splitter, inputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect input to splitter");
+ // connect splitter[0] to concat[1]
Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up.
- BOOST_TEST_CHECKPOINT("connect splitter[0] to concat[1]");
+ // connect splitter[1] to concat[0]
Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
- BOOST_TEST_CHECKPOINT("connect splitter[1] to concat[0]");
+ // connect concat to output
Connect(concat, output, inputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect concat to output");
+ // created tensor handles
CreateTensorHandles(graph, factory);
- BOOST_TEST_CHECKPOINT("created tensor handles");
+ // created splitter workload
auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
- BOOST_TEST_CHECKPOINT("created splitter workload");
+ CHECK(workloadSplitter);
+ // created concat workload
auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
- BOOST_TEST_CHECKPOINT("created concat workload");
+ CHECK(workloadConcat);
return {std::move(workloadSplitter), std::move(workloadConcat)};
}
@@ -1691,9 +1696,9 @@ std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloa
auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, factory);
auto queueDescriptor = workload->GetData();
- BOOST_CHECK(queueDescriptor.m_Inputs.size() == 1);
- BOOST_CHECK(queueDescriptor.m_Outputs.size() == 1);
- BOOST_CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1722,8 +1727,8 @@ std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::
auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, factory);
BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
return workload;
}
@@ -1756,8 +1761,8 @@ std::unique_ptr<LogSoftmaxWorkload> CreateLogSoftmaxWorkloadTest(armnn::IWorkloa
auto workload = MakeAndCheckWorkload<LogSoftmaxWorkload>(*layer, factory);
LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Return so we can do extra, backend-specific tests.
return workload;
@@ -1793,9 +1798,9 @@ std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn
auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, factory);
L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1826,8 +1831,8 @@ std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFacto
auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, factory);
ReshapeQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1855,8 +1860,8 @@ std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32Workloa
auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, factory);
ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1884,8 +1889,8 @@ std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16Workloa
auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, factory);
ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1915,10 +1920,10 @@ std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& fa
auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, factory);
MeanQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
- BOOST_TEST(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
+ CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -1944,24 +1949,26 @@ std::unique_ptr<ConcatWorkload> CreateConcatWorkloadTest(armnn::IWorkloadFactory
inputShapes.end(),
concatAxis);
+ // create concat layer
Layer* const concat = graph.AddLayer<ConcatLayer>(descriptor, "concat");
- BOOST_TEST_CHECKPOINT("created concat layer");
+ CHECK(concat);
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// Adds connections.
+ // connect input0 to concat
Connect(input0, concat, inputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect input0 to concat");
+ // connect input1 to concat
Connect(input1, concat, inputTensorInfo, 0, 1);
- BOOST_TEST_CHECKPOINT("connect input1 to concat");
+ // connect concat to output
Connect(concat, output, outputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect concat to output");
+ // create tensor handles
CreateTensorHandles(graph, factory);
- BOOST_TEST_CHECKPOINT("created tensor handles");
+ // create concat workload
auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
- BOOST_TEST_CHECKPOINT("created concat workload");
+ CHECK(workloadConcat);
return workloadConcat;
}
@@ -1979,7 +1986,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
// Add an input layer
armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
- BOOST_TEST(inputLayer);
+ CHECK(inputLayer);
// ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
// ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC
@@ -2035,11 +2042,11 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
convLayerName.c_str());
}
- BOOST_TEST(convLayer);
+ CHECK(convLayer);
// Add an output layer
armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
- BOOST_TEST(outputLayer);
+ CHECK(outputLayer);
// set the tensors in the network (NHWC format)
TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
@@ -2070,7 +2077,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::OptimizerOptions optimizerOptions;
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);
- BOOST_CHECK(optimizedNet != nullptr);
+ CHECK(optimizedNet != nullptr);
// Find the PreCompiled layer in the optimised graph
armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
@@ -2082,7 +2089,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
preCompiledLayer = layer;
}
}
- BOOST_CHECK(preCompiledLayer != nullptr);
+ CHECK(preCompiledLayer != nullptr);
// Create the TensorHandles.
CreateTensorHandles(optimisedGraph, factory);
@@ -2091,8 +2098,8 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, factory);
PreCompiledQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns the workload so we can do extra, backend-specific tests.
// NOTE: We need to return the optimised network as well, otherwise it gets
@@ -2107,21 +2114,23 @@ std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFac
{
armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+ // create constant layer
auto constant = graph.AddLayer<ConstantLayer>("constant");
+ CHECK(constant);
constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
- BOOST_TEST_CHECKPOINT("created constant layer");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// Adds connections.
+ // connect constant to output
Connect(constant, output, outputTensorInfo, 0, 0);
- BOOST_TEST_CHECKPOINT("connect constant to output");
+ // create tensor handles
CreateTensorHandles(graph, factory);
- BOOST_TEST_CHECKPOINT("created tensor handles");
+ // create Constant workload"
auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, factory);
- BOOST_TEST_CHECKPOINT("created Constant workload");
+ CHECK(workloadConstant);
return workloadConstant;
}
@@ -2136,15 +2145,15 @@ std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory&
{
// Creates the PReLU layer
Layer* const layer = graph.AddLayer<PreluLayer>("prelu");
- BOOST_CHECK(layer != nullptr);
+ CHECK(layer != nullptr);
// Creates extra layers
Layer* const input = graph.AddLayer<InputLayer> (0, "input");
Layer* const alpha = graph.AddLayer<InputLayer> (1, "alpha");
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- BOOST_CHECK(input != nullptr);
- BOOST_CHECK(alpha != nullptr);
- BOOST_CHECK(output != nullptr);
+ CHECK(input != nullptr);
+ CHECK(alpha != nullptr);
+ CHECK(output != nullptr);
// Connects up
armnn::TensorInfo inputTensorInfo (inputShape, dataType);
@@ -2159,8 +2168,8 @@ std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory&
auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, factory);
PreluQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
// Returns so we can do extra, backend-specific tests.
return workload;
@@ -2191,8 +2200,8 @@ std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWor
auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, factory);
SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
return workload;
}
@@ -2211,7 +2220,7 @@ std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory&
// Constructs the Stack layer.
armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
Layer* const stackLayer = graph.AddLayer<StackLayer>(descriptor, "stack");
- BOOST_CHECK(stackLayer != nullptr);
+ CHECK(stackLayer != nullptr);
// Constructs layer inputs and output.
std::vector<Layer*> inputs;
@@ -2221,10 +2230,10 @@ std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory&
static_cast<int>(i),
("input" + std::to_string(i)).c_str()
));
- BOOST_CHECK(inputs[i] != nullptr);
+ CHECK(inputs[i] != nullptr);
}
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- BOOST_CHECK(output != nullptr);
+ CHECK(output != nullptr);
// Adds connections.
for (unsigned int i=0; i<numInputs; ++i)
@@ -2237,8 +2246,8 @@ std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory&
auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, factory);
StackQueueDescriptor queueDescriptor = stackWorkload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == numInputs);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Inputs.size() == numInputs);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
return stackWorkload;
}
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index 2ae228b882..48e2c15a79 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -9,10 +9,10 @@
#include <armnn/Types.hpp>
#include <Runtime.hpp>
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(DebugCallback)
+#include <doctest/doctest.h>
+TEST_SUITE("DebugCallback")
+{
namespace
{
@@ -39,7 +39,7 @@ INetworkPtr CreateSimpleNetwork()
return net;
}
-BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
+TEST_CASE("RuntimeRegisterDebugCallback")
{
INetworkPtr net = CreateSimpleNetwork();
@@ -52,7 +52,7 @@ BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
// Set up callback function
int callCount = 0;
@@ -83,17 +83,17 @@ BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
// Check that the callback was called twice
- BOOST_TEST(callCount == 2);
+ CHECK(callCount == 2);
// Check that tensor handles passed to callback have correct shapes
const std::vector<TensorShape> expectedShapes({TensorShape({1, 1, 1, 5}), TensorShape({1, 1, 1, 5})});
- BOOST_TEST(tensorShapes == expectedShapes);
+ CHECK(tensorShapes == expectedShapes);
// Check that slot indexes passed to callback are correct
const std::vector<unsigned int> expectedSlotIndexes({0, 0});
- BOOST_TEST(slotIndexes == expectedSlotIndexes);
+ CHECK(slotIndexes == expectedSlotIndexes);
}
} // anonymous namespace
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index 56ff454703..705258e07f 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -8,13 +8,13 @@
#include <armnn/INetwork.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <set>
-BOOST_AUTO_TEST_SUITE(EndToEnd)
-
-BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
+TEST_SUITE("EndToEnd")
+{
+TEST_CASE("ErrorOnLoadNetwork")
{
using namespace armnn;
@@ -47,13 +47,13 @@ BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
try
{
Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
- BOOST_FAIL("Should have thrown an exception.");
+ FAIL("Should have thrown an exception.");
}
catch (const InvalidArgumentException& e)
{
// Different exceptions are thrown on different backends
}
- BOOST_CHECK(errMessages.size() > 0);
+ CHECK(errMessages.size() > 0);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ExecutionFrameTest.cpp b/src/armnn/test/ExecutionFrameTest.cpp
index c3480217a8..59accb45b7 100644
--- a/src/armnn/test/ExecutionFrameTest.cpp
+++ b/src/armnn/test/ExecutionFrameTest.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <ExecutionFrame.hpp>
@@ -14,7 +14,9 @@
// B
// |
// C
-BOOST_AUTO_TEST_CASE(NextExecutionFrameTest)
+TEST_SUITE("NextExecutionFrameTestSuite")
+{
+TEST_CASE("NextExecutionFrameTest")
{
armnn::ExecutionFrame executionFrameA;
armnn::ExecutionFrame executionFrameB;
@@ -28,11 +30,12 @@ BOOST_AUTO_TEST_CASE(NextExecutionFrameTest)
auto nextExecutionFrameB = executionFrameB.ExecuteWorkloads(&executionFrameA);
auto nextExecutionFrameC = executionFrameC.ExecuteWorkloads(&executionFrameB);
- BOOST_CHECK_EQUAL(nextExecutionFrameA, &executionFrameB);
- BOOST_CHECK_EQUAL(nextExecutionFrameB, &executionFrameC);
+ CHECK_EQ(nextExecutionFrameA, &executionFrameB);
+ CHECK_EQ(nextExecutionFrameB, &executionFrameC);
- BOOST_CHECK(!nextExecutionFrameC);
+ CHECK(!nextExecutionFrameC);
- BOOST_CHECK_NE(nextExecutionFrameA, &executionFrameC);
+ CHECK_NE(nextExecutionFrameA, &executionFrameC);
+}
}
diff --git a/src/armnn/test/FloatingPointConverterTest.cpp b/src/armnn/test/FloatingPointConverterTest.cpp
index d3474de831..21a16a3cc0 100644
--- a/src/armnn/test/FloatingPointConverterTest.cpp
+++ b/src/armnn/test/FloatingPointConverterTest.cpp
@@ -8,11 +8,13 @@
#include <BFloat16.hpp>
#include <Half.hpp>
-#include <boost/test/unit_test.hpp>
+#include <vector>
-BOOST_AUTO_TEST_SUITE(TestFPConversion)
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_CASE(TestConvertFp32ToFp16)
+TEST_SUITE("TestFPConversion")
+{
+TEST_CASE("TestConvertFp32ToFp16")
{
using namespace half_float::literal;
@@ -27,14 +29,14 @@ BOOST_AUTO_TEST_CASE(TestConvertFp32ToFp16)
{
armnn::Half expected(floatArray[i]);
armnn::Half actual = convertedBuffer[i];
- BOOST_CHECK_EQUAL(expected, actual);
+ CHECK_EQ(expected, actual);
float convertedHalf = actual;
- BOOST_CHECK_CLOSE(floatArray[i], convertedHalf, 0.07);
+ CHECK_EQ(floatArray[i], doctest::Approx(convertedHalf).epsilon(0.07));
}
}
-BOOST_AUTO_TEST_CASE(TestConvertFp16ToFp32)
+TEST_CASE("TestConvertFp16ToFp32")
{
using namespace half_float::literal;
@@ -49,11 +51,11 @@ BOOST_AUTO_TEST_CASE(TestConvertFp16ToFp32)
{
float expected(halfArray[i]);
float actual = convertedBuffer[i];
- BOOST_CHECK_EQUAL(expected, actual);
+ CHECK_EQ(expected, actual);
}
}
-BOOST_AUTO_TEST_CASE(TestConvertFloat32ToBFloat16)
+TEST_CASE("TestConvertFloat32ToBFloat16")
{
float floatArray[] = { 1.704735E38f, // 0x7F004000 round down
0.0f, // 0x00000000 round down
@@ -102,11 +104,11 @@ BOOST_AUTO_TEST_CASE(TestConvertFloat32ToBFloat16)
for (size_t i = 0; i < numFloats; i++)
{
armnn::BFloat16 actual = convertedBuffer[i];
- BOOST_CHECK_EQUAL(expectedResult[i], actual.Val());
+ CHECK_EQ(expectedResult[i], actual.Val());
}
}
-BOOST_AUTO_TEST_CASE(TestConvertBFloat16ToFloat32)
+TEST_CASE("TestConvertBFloat16ToFloat32")
{
uint16_t bf16Array[] = { 16256, 16320, 38699, 16384, 49156, 32639 };
size_t numFloats = sizeof(bf16Array) / sizeof(bf16Array[0]);
@@ -118,8 +120,8 @@ BOOST_AUTO_TEST_CASE(TestConvertBFloat16ToFloat32)
for (size_t i = 0; i < numFloats; i++)
{
float actual = convertedBuffer[i];
- BOOST_CHECK_EQUAL(expectedResult[i], actual);
+ CHECK_EQ(expectedResult[i], actual);
}
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp
index 0259d80f60..de53060c2f 100644
--- a/src/armnn/test/FlowControl.cpp
+++ b/src/armnn/test/FlowControl.cpp
@@ -7,13 +7,13 @@
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <set>
-BOOST_AUTO_TEST_SUITE(FlowControl)
-
-BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
+TEST_SUITE("FlowControl")
+{
+TEST_CASE("ErrorOnLoadNetwork")
{
using namespace armnn;
@@ -56,13 +56,13 @@ BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
try
{
Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
- BOOST_FAIL("Should have thrown an exception.");
+ FAIL("Should have thrown an exception.");
}
catch (const InvalidArgumentException& e)
{
// Different exceptions are thrown on different backends
}
- BOOST_TEST(errMessages.size() > 0);
+ CHECK(errMessages.size() > 0);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 69f96d43a3..0dc2619e51 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -17,29 +17,29 @@
#include <backendsCommon/TensorHandle.hpp>
#include <backendsCommon/TensorHandleFactoryRegistry.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Graph)
-
-BOOST_AUTO_TEST_CASE(ClassGraph)
+TEST_SUITE("Graph")
+{
+TEST_CASE("ClassGraph")
{
armnn::Graph graph;
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
- BOOST_TEST(GraphHasNamedLayer(graph, "layerA"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+ CHECK(GraphHasNamedLayer(graph, "layerA"));
}
-BOOST_AUTO_TEST_CASE(TopologicalSort)
+TEST_CASE("TopologicalSort")
{
armnn::Graph graph;
armnn::ActivationDescriptor activationDefaults;
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
@@ -64,14 +64,14 @@ BOOST_AUTO_TEST_CASE(TopologicalSort)
layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
// check order is valid
- BOOST_TEST(CheckOrder(graph, layerA, layerD));
- BOOST_TEST(CheckOrder(graph, layerA, layerE));
- BOOST_TEST(CheckOrder(graph, layerD, layerC));
- BOOST_TEST(CheckOrder(graph, layerE, layerB));
- BOOST_TEST(CheckOrder(graph, layerB, layerC));
+ CHECK(CheckOrder(graph, layerA, layerD));
+ CHECK(CheckOrder(graph, layerA, layerE));
+ CHECK(CheckOrder(graph, layerD, layerC));
+ CHECK(CheckOrder(graph, layerE, layerB));
+ CHECK(CheckOrder(graph, layerB, layerC));
}
-BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
+TEST_CASE("InsertNewLayerBefore")
{
armnn::Graph graph;
armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
@@ -79,11 +79,11 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
std::vector<armnn::Layer*> order;
armnn::ActivationDescriptor activationDefaults;
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
@@ -108,10 +108,10 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layerA, layerB));
- BOOST_TEST(CheckOrder(graph, layerA, layerC));
- BOOST_TEST(CheckOrder(graph, layerB, layerD));
- BOOST_TEST(CheckOrder(graph, layerC, layerD));
+ CHECK(CheckOrder(graph, layerA, layerB));
+ CHECK(CheckOrder(graph, layerA, layerC));
+ CHECK(CheckOrder(graph, layerB, layerD));
+ CHECK(CheckOrder(graph, layerC, layerD));
// A
// / \'
@@ -120,18 +120,18 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
// \ E
// \|
// D
- BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
+ CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
activationDefaults,
"layerE"));
armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layerA, layerB));
- BOOST_TEST(CheckOrder(graph, layerA, layerC));
- BOOST_TEST(CheckOrder(graph, layerB, layerD));
- BOOST_TEST(CheckOrder(graph, layerC, layerE));
- BOOST_TEST(CheckOrder(graph, layerE, layerD));
+ CHECK(CheckOrder(graph, layerA, layerB));
+ CHECK(CheckOrder(graph, layerA, layerC));
+ CHECK(CheckOrder(graph, layerB, layerD));
+ CHECK(CheckOrder(graph, layerC, layerE));
+ CHECK(CheckOrder(graph, layerE, layerD));
// A
// /|
@@ -142,22 +142,22 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
// \ E
// \|
// D
- BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
+ CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
activationDefaults,
"layerF"));
armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layerA, layerB));
- BOOST_TEST(CheckOrder(graph, layerA, layerF));
- BOOST_TEST(CheckOrder(graph, layerF, layerC));
- BOOST_TEST(CheckOrder(graph, layerB, layerD));
- BOOST_TEST(CheckOrder(graph, layerC, layerE));
- BOOST_TEST(CheckOrder(graph, layerE, layerD));
+ CHECK(CheckOrder(graph, layerA, layerB));
+ CHECK(CheckOrder(graph, layerA, layerF));
+ CHECK(CheckOrder(graph, layerF, layerC));
+ CHECK(CheckOrder(graph, layerB, layerD));
+ CHECK(CheckOrder(graph, layerC, layerE));
+ CHECK(CheckOrder(graph, layerE, layerD));
}
-BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
+TEST_CASE("InsertNewLayerAfter")
{
armnn::Graph graph;
armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
@@ -165,11 +165,11 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
std::vector<armnn::Layer*> order;
armnn::ActivationDescriptor activationDefaults;
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
- BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+ CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
@@ -194,10 +194,10 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layerA, layerB));
- BOOST_TEST(CheckOrder(graph, layerA, layerC));
- BOOST_TEST(CheckOrder(graph, layerB, layerD));
- BOOST_TEST(CheckOrder(graph, layerC, layerD));
+ CHECK(CheckOrder(graph, layerA, layerB));
+ CHECK(CheckOrder(graph, layerA, layerC));
+ CHECK(CheckOrder(graph, layerB, layerD));
+ CHECK(CheckOrder(graph, layerC, layerD));
// A
// / \'
@@ -206,18 +206,18 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
// \ E
// \|
// D
- BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
+ CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
activationDefaults,
"layerE"));
armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layerA, layerB));
- BOOST_TEST(CheckOrder(graph, layerA, layerC));
- BOOST_TEST(CheckOrder(graph, layerB, layerD));
- BOOST_TEST(CheckOrder(graph, layerC, layerE));
- BOOST_TEST(CheckOrder(graph, layerE, layerD));
+ CHECK(CheckOrder(graph, layerA, layerB));
+ CHECK(CheckOrder(graph, layerA, layerC));
+ CHECK(CheckOrder(graph, layerB, layerD));
+ CHECK(CheckOrder(graph, layerC, layerE));
+ CHECK(CheckOrder(graph, layerE, layerD));
// A
@@ -229,19 +229,19 @@ BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
// \ E
// \ /
// D
- BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
+ CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
activationDefaults,
"layerF"));
armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layerA, layerF));
- BOOST_TEST(CheckOrder(graph, layerF, layerB));
- BOOST_TEST(CheckOrder(graph, layerF, layerC));
- BOOST_TEST(CheckOrder(graph, layerB, layerD));
- BOOST_TEST(CheckOrder(graph, layerC, layerE));
- BOOST_TEST(CheckOrder(graph, layerE, layerD));
+ CHECK(CheckOrder(graph, layerA, layerF));
+ CHECK(CheckOrder(graph, layerF, layerB));
+ CHECK(CheckOrder(graph, layerF, layerC));
+ CHECK(CheckOrder(graph, layerB, layerD));
+ CHECK(CheckOrder(graph, layerC, layerE));
+ CHECK(CheckOrder(graph, layerE, layerD));
}
namespace
@@ -282,7 +282,7 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
std::sort(sortedNewEdges.begin(), sortedNewEdges.end());
auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end());
- BOOST_CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
+ CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
}
// Each new edge must be tested.
@@ -308,13 +308,13 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
// Each vertex should correspond to a layer.
const armnn::Layer* srcLayer = edge.first;
const armnn::Layer* dstLayer = edge.second;
- BOOST_TEST(srcLayer);
- BOOST_TEST(dstLayer);
+ CHECK(srcLayer);
+ CHECK(dstLayer);
// Both layers must have the same compute device.
if (srcLayer && dstLayer)
{
- BOOST_TEST((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
+ CHECK((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
}
// Marks edge in original graph as observed (by deleting it).
@@ -329,9 +329,9 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
if (srcLayer == nullptr || dstLayer == nullptr)
{
- BOOST_ERROR("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second << ") "
- "introduced after adding copy layers to a graph "
- "correspond to a layer not known to the graph");
+ FAIL("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second
+ << ") introduced after adding copy layers to a graph "
+ "correspond to a layer not known to the graph");
continue;
}
@@ -341,14 +341,14 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
if (srcLayerInOrigGraph == dstLayerInOrigGraph)
{
- BOOST_ERROR("A new edge ("
- << edge.first->GetName()
- << ", "
- << edge.second->GetName()
- << ") introduced after adding copy "
- "layers to a graph is invalid. One of the ends should be present in the original "
- "graph and the other should not, but "
- << (srcLayerInOrigGraph ? "both are" : "none are"));
+ FAIL("A new edge ("
+ << edge.first->GetName()
+ << ", "
+ << edge.second->GetName()
+ << ") introduced after adding copy "
+ "layers to a graph is invalid. One of the ends should be present in the original "
+ "graph and the other should not, but "
+ << (srcLayerInOrigGraph ? "both are" : "none are"));
continue;
}
@@ -376,7 +376,7 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
if (adjEdges.empty())
{
- BOOST_ERROR("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
+ FAIL("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
edge.second << "), but no other edges connecting the copy layer '" << copyLayer->GetName()
<< "' to other layers could be found");
continue;
@@ -390,14 +390,14 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
if (!adjLayer)
{
- BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an edge "
- "connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << "), "
- "but the non-copy layer in the former does not correspond to a layer");
+ FAIL("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an "
+ "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
+ "), but the non-copy layer in the former does not correspond to a layer");
continue;
}
// Both layers must have different compute devices.
- BOOST_TEST((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
+ CHECK((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
// There must exist an edge connecting both layers directly in the original graph.
{
@@ -420,8 +420,8 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
}
else
{
- BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to an "
- "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
+ FAIL("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to "
+ "an edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
"), but there is no edge connecting the layers in the original graph");
}
}
@@ -429,7 +429,7 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
}
}
- BOOST_TEST(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
+ CHECK_MESSAGE(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
}
struct CopyLayersFixture
@@ -513,7 +513,7 @@ private:
};
};
-BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
+TEST_CASE_FIXTURE(CopyLayersFixture, "AddCopyLayers")
{
InitialiseTestGraph();
const armnn::Graph origGraph(m_Graph);
@@ -522,7 +522,7 @@ BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
TestGraphAfterAddingCopyLayers(m_Graph, origGraph);
}
-BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture)
+TEST_CASE_FIXTURE(CopyLayersFixture, "AddCopyLayersSeveralTimes")
{
InitialiseTestGraph();
m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
@@ -533,11 +533,11 @@ BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture)
{
m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
const std::vector<Edge> otherEdges = GetEdgeList(m_Graph);
- BOOST_TEST((edges == otherEdges));
+ CHECK((edges == otherEdges));
}
}
-BOOST_FIXTURE_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames, CopyLayersFixture)
+TEST_CASE_FIXTURE(CopyLayersFixture, "CopyLayersAddedBetweenSameLayersHaveDifferentNames")
{
armnn::Graph graph;
@@ -567,13 +567,13 @@ BOOST_FIXTURE_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames, Copy
graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
std::vector<Edge> edges = GetEdgeList(graph);
- BOOST_CHECK(edges.size() == 6u);
+ CHECK(edges.size() == 6u);
std::sort(edges.begin(), edges.end());
auto last = std::unique(edges.begin(), edges.end());
- BOOST_CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
+ CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
}
-BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
+TEST_CASE("DuplicateLayerNames")
{
armnn::Graph graph;
@@ -586,11 +586,11 @@ BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
auto it = graph.TopologicalSort().begin();
- BOOST_TEST(((*it)->GetType() == armnn::LayerType::Input));
- BOOST_TEST(((*std::next(it))->GetType() == armnn::LayerType::Output));
+ CHECK(((*it)->GetType() == armnn::LayerType::Input));
+ CHECK(((*std::next(it))->GetType() == armnn::LayerType::Output));
}
-BOOST_AUTO_TEST_CASE(CheckGraphConstTensorSharing)
+TEST_CASE("CheckGraphConstTensorSharing")
{
armnn::Graph graph0;
const float* sharedWeightPtr;
@@ -611,7 +611,7 @@ BOOST_AUTO_TEST_CASE(CheckGraphConstTensorSharing)
// graph1 goes out of scope
}
- BOOST_TEST(*sharedWeightPtr == 1);
+ CHECK(*sharedWeightPtr == 1);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 015ab67079..81ad7b2d38 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -7,10 +7,8 @@
#include <test/UnitTests.hpp>
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
-
+TEST_SUITE("LayerValidateOutput")
+{
// ArgMinMax
ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape4d, ArgMinMaxInferOutputShape4dTest)
ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape3d, ArgMinMaxInferOutputShape3dTest)
@@ -52,4 +50,4 @@ ARMNN_SIMPLE_TEST_CASE(QLstmInferOutputShape, QLstmInferOutputShapeTest)
// QuantizedLstm
ARMNN_SIMPLE_TEST_CASE(QuantizedLstmInferOutputShape, QuantizedLstmInferOutputShapeTest)
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 0413682dad..b8276de80c 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -14,7 +14,7 @@
#include <layers/PreluLayer.hpp>
#include <layers/StackLayer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor,
const std::vector<armnn::TensorShape>& inputShapes,
@@ -37,11 +37,11 @@ void ArgMinMaxInferOutputShape4dTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+ CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShape);
}
void ArgMinMaxInferOutputShape3dTest()
@@ -56,11 +56,11 @@ void ArgMinMaxInferOutputShape3dTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+ CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
armnn::TensorShape expectedOutputShape( { 3, 2 } );
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShape);
}
void ArgMinMaxInferOutputShape2dTest()
@@ -75,11 +75,11 @@ void ArgMinMaxInferOutputShape2dTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+ CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
armnn::TensorShape expectedOutputShape( { 3 } );
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShape);
}
void ArgMinMaxInferOutputShape1dTest()
@@ -94,11 +94,11 @@ void ArgMinMaxInferOutputShape1dTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+ CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
armnn::TensorShape expectedOutputShape( { 1 } );
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShape);
}
void BatchToSpaceInferOutputShapeTest()
@@ -121,7 +121,7 @@ void BatchToSpaceInferOutputShapeTest()
const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
armnn::TensorShape expectedShape(4, expectedDimSizes.data());
- BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
+ CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
}
void SpaceToDepthInferOutputShapeTest()
@@ -143,7 +143,7 @@ void SpaceToDepthInferOutputShapeTest()
const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
armnn::TensorShape expectedShape(4, expectedDimSizes.data());
- BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
+ CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
}
void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
@@ -168,10 +168,10 @@ void PreluInferOutputShapeSameDimsTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+ CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShapes[0]);
}
void PreluInferOutputShapeInputBiggerTest()
@@ -188,10 +188,10 @@ void PreluInferOutputShapeInputBiggerTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+ CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShapes[0]);
}
void PreluInferOutputShapeAlphaBiggerTest()
@@ -208,10 +208,10 @@ void PreluInferOutputShapeAlphaBiggerTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+ CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShapes[0]);
}
void PreluInferOutputShapeNoMatchTest()
@@ -228,10 +228,10 @@ void PreluInferOutputShapeNoMatchTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+ CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] != expectedOutputShapes[0]);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] != expectedOutputShapes[0]);
}
void CreatePreluLayerHelper(armnn::Graph& graph,
@@ -264,7 +264,7 @@ void PreluValidateTensorShapesFromInputsMatchTest()
CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
// Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
void PreluValidateTensorShapesFromInputsNoMatchTest()
@@ -275,7 +275,7 @@ void PreluValidateTensorShapesFromInputsNoMatchTest()
CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
// Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
- BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+ CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
}
void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor,
@@ -307,14 +307,14 @@ void StackInferOutputShapeFromInputsMatchTest()
};
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+ CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
armnn::TensorShape expectedOutputShape
(
{ 4, 3, 2 }
);
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShape);
}
void StackInferOutputShapeFromInputsNoMatchTest()
@@ -338,14 +338,14 @@ void StackInferOutputShapeFromInputsNoMatchTest()
// Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
std::vector<armnn::TensorShape> outputShapes;
- BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+ CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
armnn::TensorShape expectedOutputShape
(
{ 4, 3, 2 }
);
- BOOST_CHECK(outputShapes.size() == 1);
- BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+ CHECK(outputShapes.size() == 1);
+ CHECK(outputShapes[0] == expectedOutputShape);
}
void CreateStackLayerHelper(armnn::Graph& graph,
@@ -402,7 +402,7 @@ void StackValidateTensorShapesFromInputsMatchTest()
CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
// Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
void StackValidateTensorShapesFromInputsNoMatchTest()
@@ -428,7 +428,7 @@ void StackValidateTensorShapesFromInputsNoMatchTest()
CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
// Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
- BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+ CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
}
void Convolution2dInferOutputShapeTest()
@@ -461,7 +461,7 @@ void Convolution2dInferOutputShapeTest()
const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
- BOOST_CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
+ CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
}
void TransposeConvolution2dInferOutputShapeTest()
@@ -492,7 +492,7 @@ void TransposeConvolution2dInferOutputShapeTest()
const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
- BOOST_CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
+ CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
}
void DepthwiseConvolution2dInferOutputShapeTest()
@@ -525,7 +525,7 @@ void DepthwiseConvolution2dInferOutputShapeTest()
const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
- BOOST_CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
+ CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
}
// QLstm
@@ -577,12 +577,12 @@ void QLstmInferOutputShapeTest()
};
std::vector<armnn::TensorShape> actualOutShapes;
- BOOST_CHECK_NO_THROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
+ CHECK_NOTHROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
- BOOST_CHECK(actualOutShapes.size() == 3);
- BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
- BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
- BOOST_CHECK(expectedOutShapes[2] == actualOutShapes[2]);
+ CHECK(actualOutShapes.size() == 3);
+ CHECK(expectedOutShapes[0] == actualOutShapes[0]);
+ CHECK(expectedOutShapes[1] == actualOutShapes[1]);
+ CHECK(expectedOutShapes[2] == actualOutShapes[2]);
}
// QuantizedLstm
@@ -624,9 +624,9 @@ void QuantizedLstmInferOutputShapeTest()
};
std::vector<armnn::TensorShape> actualOutShapes;
- BOOST_CHECK_NO_THROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
+ CHECK_NOTHROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
- BOOST_CHECK(actualOutShapes.size() == 2);
- BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
- BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
+ CHECK(actualOutShapes.size() == 2);
+ CHECK(expectedOutShapes[0] == actualOutShapes[0]);
+ CHECK(expectedOutShapes[1] == actualOutShapes[1]);
}
diff --git a/src/armnn/test/InstrumentTests.cpp b/src/armnn/test/InstrumentTests.cpp
index e0d0d94700..447a4c9d58 100644
--- a/src/armnn/test/InstrumentTests.cpp
+++ b/src/armnn/test/InstrumentTests.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include "WallClockTimer.hpp"
@@ -11,13 +11,13 @@
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Instruments)
-
-BOOST_AUTO_TEST_CASE(WallClockTimerInMicroseconds)
+TEST_SUITE("Instruments")
+{
+TEST_CASE("WallClockTimerInMicroseconds")
{
WallClockTimer wallClockTimer;
- BOOST_CHECK_EQUAL(wallClockTimer.GetName(), "WallClockTimer");
+ CHECK((std::string(wallClockTimer.GetName()) == std::string("WallClockTimer")));
// start the timer
wallClockTimer.Start();
@@ -28,17 +28,17 @@ BOOST_AUTO_TEST_CASE(WallClockTimerInMicroseconds)
// stop the timer
wallClockTimer.Stop();
- BOOST_CHECK_EQUAL(wallClockTimer.GetMeasurements().front().m_Name, WallClockTimer::WALL_CLOCK_TIME);
+ CHECK((wallClockTimer.GetMeasurements().front().m_Name == WallClockTimer::WALL_CLOCK_TIME));
// check that WallClockTimer measurement should be >= 10 microseconds
- BOOST_CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, std::chrono::microseconds(10).count());
+ CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, std::chrono::microseconds(10).count());
}
-BOOST_AUTO_TEST_CASE(WallClockTimerInNanoseconds)
+TEST_CASE("WallClockTimerInNanoseconds")
{
WallClockTimer wallClockTimer;
- BOOST_CHECK_EQUAL(wallClockTimer.GetName(), "WallClockTimer");
+ CHECK((std::string(wallClockTimer.GetName()) == std::string("WallClockTimer")));
// start the timer
wallClockTimer.Start();
@@ -49,14 +49,14 @@ BOOST_AUTO_TEST_CASE(WallClockTimerInNanoseconds)
// stop the timer
wallClockTimer.Stop();
- BOOST_CHECK_EQUAL(wallClockTimer.GetMeasurements().front().m_Name, WallClockTimer::WALL_CLOCK_TIME);
+ CHECK((wallClockTimer.GetMeasurements().front().m_Name == WallClockTimer::WALL_CLOCK_TIME));
// delta is 0.5 microseconds
const auto delta =
std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(std::chrono::nanoseconds(500));
// check that WallClockTimer measurement should be >= 0.5 microseconds
- BOOST_CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, delta.count());
+ CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, delta.count());
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp
index 93dba7e75e..47f112ee72 100644
--- a/src/armnn/test/ModelAccuracyCheckerTest.cpp
+++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp
@@ -4,7 +4,7 @@
//
#include "ModelAccuracyChecker.hpp"
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <iostream>
#include <string>
@@ -50,12 +50,12 @@ struct TestHelper
};
}
-BOOST_AUTO_TEST_SUITE(ModelAccuracyCheckerTest)
-
+TEST_SUITE("ModelAccuracyCheckerTest")
+{
using TContainer =
mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
-BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper)
+TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
{
ModelAccuracyChecker checker(GetValidationLabelSet(), GetModelOutputLabels());
@@ -70,7 +70,7 @@ BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper)
// Top 1 Accuracy
float totalAccuracy = checker.GetAccuracy(1);
- BOOST_CHECK(totalAccuracy == 100.0f);
+ CHECK(totalAccuracy == 100.0f);
// Add image 2 and check accuracy
std::vector<float> inferenceOutputVector2 = {0.10f, 0.0f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
@@ -83,11 +83,11 @@ BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper)
// Top 1 Accuracy
totalAccuracy = checker.GetAccuracy(1);
- BOOST_CHECK(totalAccuracy == 50.0f);
+ CHECK(totalAccuracy == 50.0f);
// Top 2 Accuracy
totalAccuracy = checker.GetAccuracy(2);
- BOOST_CHECK(totalAccuracy == 100.0f);
+ CHECK(totalAccuracy == 100.0f);
// Add image 3 and check accuracy
std::vector<float> inferenceOutputVector3 = {0.0f, 0.10f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
@@ -100,15 +100,15 @@ BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper)
// Top 1 Accuracy
totalAccuracy = checker.GetAccuracy(1);
- BOOST_CHECK(totalAccuracy == 33.3333321f);
+ CHECK(totalAccuracy == 33.3333321f);
// Top 2 Accuracy
totalAccuracy = checker.GetAccuracy(2);
- BOOST_CHECK(totalAccuracy == 66.6666641f);
+ CHECK(totalAccuracy == 66.6666641f);
// Top 3 Accuracy
totalAccuracy = checker.GetAccuracy(3);
- BOOST_CHECK(totalAccuracy == 100.0f);
+ CHECK(totalAccuracy == 100.0f);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 692d64e4e0..d763a85100 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -9,7 +9,7 @@
#include <Network.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
namespace
{
@@ -27,27 +27,27 @@ bool AreAllLayerInputSlotsConnected(const armnn::IConnectableLayer& layer)
}
-BOOST_AUTO_TEST_SUITE(Network)
-
-BOOST_AUTO_TEST_CASE(LayerGuids)
+TEST_SUITE("Network")
+{
+TEST_CASE("LayerGuids")
{
armnn::NetworkImpl net;
armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
- BOOST_TEST(inputId != addId);
- BOOST_TEST(addId != outputId);
- BOOST_TEST(inputId != outputId);
+ CHECK(inputId != addId);
+ CHECK(addId != outputId);
+ CHECK(inputId != outputId);
}
-BOOST_AUTO_TEST_CASE(NetworkBasic)
+TEST_CASE("NetworkBasic")
{
armnn::NetworkImpl net;
- BOOST_TEST(net.PrintGraph() == armnn::Status::Success);
+ CHECK(net.PrintGraph() == armnn::Status::Success);
}
-BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork)
+TEST_CASE("LayerNamesAreOptionalForINetwork")
{
armnn::INetworkPtr inet(armnn::INetwork::Create());
inet->AddInputLayer(0);
@@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork)
inet->AddOutputLayer(0);
}
-BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
+TEST_CASE("LayerNamesAreOptionalForNetwork")
{
armnn::NetworkImpl net;
net.AddInputLayer(0);
@@ -65,12 +65,12 @@ BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
net.AddOutputLayer(0);
}
-BOOST_AUTO_TEST_CASE(NetworkModification)
+TEST_CASE("NetworkModification")
{
armnn::NetworkImpl net;
armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
- BOOST_TEST(inputLayer);
+ CHECK(inputLayer);
unsigned int dims[] = { 10,1,1,1 };
std::vector<float> convWeightsData(10);
@@ -81,7 +81,7 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
weights,
armnn::EmptyOptional(),
"conv layer");
- BOOST_TEST(convLayer);
+ CHECK(convLayer);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
@@ -90,31 +90,31 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
weights,
armnn::EmptyOptional(),
"fully connected");
- BOOST_TEST(fullyConnectedLayer);
+ CHECK(fullyConnectedLayer);
convLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
armnn::Pooling2dDescriptor pooling2dDesc;
armnn::IConnectableLayer* const poolingLayer = net.AddPooling2dLayer(pooling2dDesc, "pooling2d");
- BOOST_TEST(poolingLayer);
+ CHECK(poolingLayer);
fullyConnectedLayer->GetOutputSlot(0).Connect(poolingLayer->GetInputSlot(0));
armnn::ActivationDescriptor activationDesc;
armnn::IConnectableLayer* const activationLayer = net.AddActivationLayer(activationDesc, "activation");
- BOOST_TEST(activationLayer);
+ CHECK(activationLayer);
poolingLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
armnn::NormalizationDescriptor normalizationDesc;
armnn::IConnectableLayer* const normalizationLayer = net.AddNormalizationLayer(normalizationDesc, "normalization");
- BOOST_TEST(normalizationLayer);
+ CHECK(normalizationLayer);
activationLayer->GetOutputSlot(0).Connect(normalizationLayer->GetInputSlot(0));
armnn::SoftmaxDescriptor softmaxDesc;
armnn::IConnectableLayer* const softmaxLayer = net.AddSoftmaxLayer(softmaxDesc, "softmax");
- BOOST_TEST(softmaxLayer);
+ CHECK(softmaxLayer);
normalizationLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
@@ -130,42 +130,42 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
invalidTensor,
invalidTensor,
"batch norm");
- BOOST_TEST(batchNormalizationLayer);
+ CHECK(batchNormalizationLayer);
softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition");
- BOOST_TEST(additionLayer);
+ CHECK(additionLayer);
batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication");
- BOOST_TEST(multiplicationLayer);
+ CHECK(multiplicationLayer);
additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(1));
armnn::IConnectableLayer* const outputLayer = net.AddOutputLayer(0, "output layer");
- BOOST_TEST(outputLayer);
+ CHECK(outputLayer);
multiplicationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
//Tests that all layers are present in the graph.
- BOOST_TEST(net.GetGraph().GetNumLayers() == 11);
+ CHECK(net.GetGraph().GetNumLayers() == 11);
//Tests that the vertices exist and have correct names.
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "input layer"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "conv layer"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "fully connected"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "pooling2d"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "activation"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "normalization"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "softmax"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "batch norm"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "addition"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "multiplication"));
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "output layer"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "input layer"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "conv layer"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "fully connected"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "pooling2d"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "activation"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "normalization"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "softmax"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "batch norm"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "addition"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "multiplication"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "output layer"));
auto checkOneOutputToOneInputConnection = []
(const armnn::IConnectableLayer* const srcLayer,
@@ -173,14 +173,14 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
int expectedSrcNumInputs = 1,
int expectedDstNumOutputs = 1)
{
- BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
- BOOST_TEST(srcLayer->GetNumOutputSlots() == 1);
- BOOST_TEST(tgtLayer->GetNumInputSlots() == 1);
- BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
-
- BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 1);
- BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0));
- BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection());
+ CHECK(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
+ CHECK(srcLayer->GetNumOutputSlots() == 1);
+ CHECK(tgtLayer->GetNumInputSlots() == 1);
+ CHECK(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
+
+ CHECK(srcLayer->GetOutputSlot(0).GetNumConnections() == 1);
+ CHECK(srcLayer->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0));
+ CHECK(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection());
};
auto checkOneOutputToTwoInputsConnections = []
(const armnn::IConnectableLayer* const srcLayer,
@@ -188,29 +188,29 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
int expectedSrcNumInputs,
int expectedDstNumOutputs = 1)
{
- BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
- BOOST_TEST(srcLayer->GetNumOutputSlots() == 1);
- BOOST_TEST(tgtLayer->GetNumInputSlots() == 2);
- BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
+ CHECK(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
+ CHECK(srcLayer->GetNumOutputSlots() == 1);
+ CHECK(tgtLayer->GetNumInputSlots() == 2);
+ CHECK(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
- BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 2);
+ CHECK(srcLayer->GetOutputSlot(0).GetNumConnections() == 2);
for (unsigned int i = 0; i < srcLayer->GetOutputSlot(0).GetNumConnections(); ++i)
{
- BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(i) == &tgtLayer->GetInputSlot(i));
- BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection());
+ CHECK(srcLayer->GetOutputSlot(0).GetConnection(i) == &tgtLayer->GetInputSlot(i));
+ CHECK(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection());
}
};
- BOOST_TEST(AreAllLayerInputSlotsConnected(*convLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*fullyConnectedLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*poolingLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*activationLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*normalizationLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*softmaxLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*batchNormalizationLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*additionLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*multiplicationLayer));
- BOOST_TEST(AreAllLayerInputSlotsConnected(*outputLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*convLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*fullyConnectedLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*poolingLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*activationLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*normalizationLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*softmaxLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*batchNormalizationLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*additionLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*multiplicationLayer));
+ CHECK(AreAllLayerInputSlotsConnected(*outputLayer));
// Checks connectivity.
checkOneOutputToOneInputConnection(inputLayer, convLayer, 0);
@@ -225,32 +225,32 @@ BOOST_AUTO_TEST_CASE(NetworkModification)
checkOneOutputToOneInputConnection(multiplicationLayer, outputLayer, 2, 0);
}
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
+TEST_CASE("NetworkModification_SplitterConcat")
{
armnn::NetworkImpl net;
// Adds an input layer and an input tensor descriptor.
armnn::IConnectableLayer* inputLayer = net.AddInputLayer(0, "input layer");
- BOOST_TEST(inputLayer);
+ CHECK(inputLayer);
// Adds a splitter layer.
armnn::ViewsDescriptor splitterDesc(2,4);
armnn::IConnectableLayer* splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
- BOOST_TEST(splitterLayer);
+ CHECK(splitterLayer);
inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
// Adds a softmax layer 1.
armnn::SoftmaxDescriptor softmaxDescriptor;
armnn::IConnectableLayer* softmaxLayer1 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
- BOOST_TEST(softmaxLayer1);
+ CHECK(softmaxLayer1);
splitterLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
// Adds a softmax layer 2.
armnn::IConnectableLayer* softmaxLayer2 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
- BOOST_TEST(softmaxLayer2);
+ CHECK(softmaxLayer2);
splitterLayer->GetOutputSlot(1).Connect(softmaxLayer2->GetInputSlot(0));
@@ -258,62 +258,62 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
armnn::OriginsDescriptor concatDesc(2, 4);
armnn::IConnectableLayer* concatLayer = net.AddConcatLayer(concatDesc, "concat layer");
- BOOST_TEST(concatLayer);
+ CHECK(concatLayer);
softmaxLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
softmaxLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
// Adds an output layer.
armnn::IConnectableLayer* outputLayer = net.AddOutputLayer(0, "output layer");
- BOOST_TEST(outputLayer);
+ CHECK(outputLayer);
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
- BOOST_TEST(splitterLayer->GetNumOutputSlots() == 2);
- BOOST_TEST(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0));
- BOOST_TEST(&splitterLayer->GetOutputSlot(0) == softmaxLayer1->GetInputSlot(0).GetConnection());
- BOOST_TEST(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0));
- BOOST_TEST(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection());
-
- BOOST_TEST(concatLayer->GetNumInputSlots() == 2);
- BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(0));
- BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == concatLayer->GetInputSlot(0).GetConnection());
- BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(1));
- BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == concatLayer->GetInputSlot(1).GetConnection());
+ CHECK(splitterLayer->GetNumOutputSlots() == 2);
+ CHECK(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0));
+ CHECK(&splitterLayer->GetOutputSlot(0) == softmaxLayer1->GetInputSlot(0).GetConnection());
+ CHECK(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0));
+ CHECK(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection());
+
+ CHECK(concatLayer->GetNumInputSlots() == 2);
+ CHECK(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(0));
+ CHECK(&softmaxLayer1->GetOutputSlot(0) == concatLayer->GetInputSlot(0).GetConnection());
+ CHECK(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(1));
+ CHECK(&softmaxLayer2->GetOutputSlot(0) == concatLayer->GetInputSlot(1).GetConnection());
}
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
+TEST_CASE("NetworkModification_SplitterAddition")
{
armnn::NetworkImpl net;
// Adds an input layer and an input tensor descriptor.
armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
- BOOST_TEST(layer);
+ CHECK(layer);
// Adds a splitter layer.
armnn::ViewsDescriptor splitterDesc(2,4);
armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
- BOOST_TEST(splitterLayer);
+ CHECK(splitterLayer);
layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
// Adds a softmax layer 1.
armnn::SoftmaxDescriptor softmaxDescriptor;
armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
- BOOST_TEST(softmax1Layer);
+ CHECK(softmax1Layer);
splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0));
// Adds a softmax layer 2.
armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
- BOOST_TEST(softmax2Layer);
+ CHECK(softmax2Layer);
splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
// Adds addition layer.
layer = net.AddAdditionLayer("add layer");
- BOOST_TEST(layer);
+ CHECK(layer);
softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -324,40 +324,40 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
- BOOST_TEST(layer);
+ CHECK(layer);
}
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
+TEST_CASE("NetworkModification_SplitterMultiplication")
{
armnn::NetworkImpl net;
// Adds an input layer and an input tensor descriptor.
armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
- BOOST_TEST(layer);
+ CHECK(layer);
// Adds a splitter layer.
armnn::ViewsDescriptor splitterDesc(2,4);
armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
- BOOST_TEST(splitterLayer);
+ CHECK(splitterLayer);
layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
// Adds a softmax layer 1.
armnn::SoftmaxDescriptor softmaxDescriptor;
armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
- BOOST_TEST(softmax1Layer);
+ CHECK(softmax1Layer);
splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0));
// Adds a softmax layer 2.
armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
- BOOST_TEST(softmax2Layer);
+ CHECK(softmax2Layer);
splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
// Adds multiplication layer.
layer = net.AddMultiplicationLayer("multiplication layer");
- BOOST_TEST(layer);
+ CHECK(layer);
softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -365,12 +365,12 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
// Adds an output layer.
armnn::IConnectableLayer* prevLayer = layer;
layer = net.AddOutputLayer(0, "output layer");
- BOOST_TEST(layer);
+ CHECK(layer);
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(Network_AddQuantize)
+TEST_CASE("Network_AddQuantize")
{
struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -378,20 +378,20 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
{
m_Visited = true;
- BOOST_TEST(layer);
+ CHECK(layer);
std::string expectedName = std::string("quantize");
- BOOST_TEST(std::string(layer->GetName()) == expectedName);
- BOOST_TEST(std::string(name) == expectedName);
+ CHECK(std::string(layer->GetName()) == expectedName);
+ CHECK(std::string(name) == expectedName);
- BOOST_TEST(layer->GetNumInputSlots() == 1);
- BOOST_TEST(layer->GetNumOutputSlots() == 1);
+ CHECK(layer->GetNumInputSlots() == 1);
+ CHECK(layer->GetNumOutputSlots() == 1);
const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
- BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
+ CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+ CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
}
bool m_Visited = false;
@@ -416,11 +416,11 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
Test testQuantize;
graph->Accept(testQuantize);
- BOOST_TEST(testQuantize.m_Visited == true);
+ CHECK(testQuantize.m_Visited == true);
}
-BOOST_AUTO_TEST_CASE(Network_AddMerge)
+TEST_CASE("Network_AddMerge")
{
struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
{
@@ -428,23 +428,23 @@ BOOST_AUTO_TEST_CASE(Network_AddMerge)
{
m_Visited = true;
- BOOST_TEST(layer);
+ CHECK(layer);
std::string expectedName = std::string("merge");
- BOOST_TEST(std::string(layer->GetName()) == expectedName);
- BOOST_TEST(std::string(name) == expectedName);
+ CHECK(std::string(layer->GetName()) == expectedName);
+ CHECK(std::string(name) == expectedName);
- BOOST_TEST(layer->GetNumInputSlots() == 2);
- BOOST_TEST(layer->GetNumOutputSlots() == 1);
+ CHECK(layer->GetNumInputSlots() == 2);
+ CHECK(layer->GetNumOutputSlots() == 1);
const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
- BOOST_TEST((infoIn0.GetDataType() == armnn::DataType::Float32));
+ CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
- BOOST_TEST((infoIn1.GetDataType() == armnn::DataType::Float32));
+ CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((infoOut.GetDataType() == armnn::DataType::Float32));
+ CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
}
bool m_Visited = false;
@@ -469,10 +469,10 @@ BOOST_AUTO_TEST_CASE(Network_AddMerge)
Test testMerge;
network->Accept(testMerge);
- BOOST_TEST(testMerge.m_Visited == true);
+ CHECK(testMerge.m_Visited == true);
}
-BOOST_AUTO_TEST_CASE(StandInLayerNetworkTest)
+TEST_CASE("StandInLayerNetworkTest")
{
// Create a simple network with a StandIn some place in it.
armnn::NetworkImpl net;
@@ -498,14 +498,14 @@ BOOST_AUTO_TEST_CASE(StandInLayerNetworkTest)
standIn->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// Check that the layer is there.
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
// Check that it is connected as expected.
- BOOST_TEST(input->GetOutputSlot(0).GetConnection(0) == &floor->GetInputSlot(0));
- BOOST_TEST(floor->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
- BOOST_TEST(standIn->GetOutputSlot(0).GetConnection(0) == &output->GetInputSlot(0));
+ CHECK(input->GetOutputSlot(0).GetConnection(0) == &floor->GetInputSlot(0));
+ CHECK(floor->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
+ CHECK(standIn->GetOutputSlot(0).GetConnection(0) == &output->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(StandInLayerSingleInputMultipleOutputsNetworkTest)
+TEST_CASE("StandInLayerSingleInputMultipleOutputsNetworkTest")
{
// Another test with one input and two outputs on the StandIn layer.
armnn::NetworkImpl net;
@@ -531,11 +531,11 @@ BOOST_AUTO_TEST_CASE(StandInLayerSingleInputMultipleOutputsNetworkTest)
standIn->GetOutputSlot(1).Connect(output1->GetInputSlot(0));
// Check that the layer is there.
- BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
+ CHECK(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
// Check that it is connected as expected.
- BOOST_TEST(input->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
- BOOST_TEST(standIn->GetOutputSlot(0).GetConnection(0) == &output0->GetInputSlot(0));
- BOOST_TEST(standIn->GetOutputSlot(1).GetConnection(0) == &output1->GetInputSlot(0));
+ CHECK(input->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
+ CHECK(standIn->GetOutputSlot(0).GetConnection(0) == &output0->GetInputSlot(0));
+ CHECK(standIn->GetOutputSlot(1).GetConnection(0) == &output1->GetInputSlot(0));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ObservableTest.cpp b/src/armnn/test/ObservableTest.cpp
index 3bb78a8e25..02d93f1dfa 100644
--- a/src/armnn/test/ObservableTest.cpp
+++ b/src/armnn/test/ObservableTest.cpp
@@ -3,14 +3,14 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include "Graph.hpp"
#include "Observable.hpp"
-BOOST_AUTO_TEST_SUITE(Observable)
-
-BOOST_AUTO_TEST_CASE(AddedLayerObservableTest)
+TEST_SUITE("Observable")
+{
+TEST_CASE("AddedLayerObservableTest")
{
armnn::Graph graph;
@@ -24,11 +24,11 @@ BOOST_AUTO_TEST_CASE(AddedLayerObservableTest)
// Check the observable has observed the changes
std::list<armnn::Layer*> testLayers({ output, input });
- BOOST_CHECK_EQUAL_COLLECTIONS(layerObservable.begin(), layerObservable.end(),
- testLayers.begin(), testLayers.end());
+ CHECK(std::equal(layerObservable.begin(), layerObservable.end(),
+ testLayers.begin(), testLayers.end()));
}
-BOOST_AUTO_TEST_CASE(ClearAddedLayerObservableTest)
+TEST_CASE("ClearAddedLayerObservableTest")
{
armnn::Graph graph;
@@ -44,11 +44,11 @@ BOOST_AUTO_TEST_CASE(ClearAddedLayerObservableTest)
// Check the observable has observed the changes
std::list<armnn::Layer*> emptyList({});
- BOOST_CHECK_EQUAL_COLLECTIONS(addedLayerObservable.begin(), addedLayerObservable.end(),
- emptyList.begin(), emptyList.end());
+ CHECK(std::equal(addedLayerObservable.begin(), addedLayerObservable.end(),
+ emptyList.begin(), emptyList.end()));
}
-BOOST_AUTO_TEST_CASE(ErasedLayerNamesObservableTest)
+TEST_CASE("ErasedLayerNamesObservableTest")
{
armnn::Graph graph;
@@ -64,11 +64,11 @@ BOOST_AUTO_TEST_CASE(ErasedLayerNamesObservableTest)
// Check the observable has observed the changes
std::list<std::string> testList({"output"});
- BOOST_CHECK_EQUAL_COLLECTIONS(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
- testList.begin(), testList.end());
+ CHECK(std::equal(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
+ testList.begin(), testList.end()));
}
-BOOST_AUTO_TEST_CASE(ClearErasedLayerNamesObservableTest)
+TEST_CASE("ClearErasedLayerNamesObservableTest")
{
armnn::Graph graph;
@@ -86,9 +86,9 @@ BOOST_AUTO_TEST_CASE(ClearErasedLayerNamesObservableTest)
// Check the observable has observed the changes
std::list<std::string> emptyList({});
- BOOST_CHECK_EQUAL_COLLECTIONS(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
- emptyList.begin(), emptyList.end());
+ CHECK(std::equal(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
+ emptyList.begin(), emptyList.end()));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 7fe69a9380..e68546c9dd 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -15,6 +15,7 @@
#include <armnn/INetwork.hpp>
#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
@@ -22,7 +23,7 @@
#include <backendsCommon/LayerSupportBase.hpp>
#include <backendsCommon/TensorHandle.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
@@ -140,10 +141,11 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
} // namespace
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
+TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGDisabledTest")
{
Graph graph;
@@ -151,10 +153,10 @@ BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
CreateLSTMLayerHelper(graph, false);
//This function used to call ValidateShapesFromInputs();
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
+TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGEnabledTest")
{
Graph graph;
@@ -162,10 +164,10 @@ BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
CreateLSTMLayerHelper(graph, true);
//This function used to call ValidateShapesFromInputs();
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(InsertConvertersTest)
+TEST_CASE("InsertConvertersTest")
{
const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
@@ -191,7 +193,7 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
->GetOutputHandler().SetTensorInfo(info);
// Check graph layer sequence before inserting convert layers
- BOOST_TEST(CheckSequence(graph.cbegin(),
+ CHECK(CheckSequence(graph.cbegin(),
graph.cend(),
&IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>,
@@ -241,7 +243,7 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
}
// Check sequence of layers after inserting convert layers
- BOOST_TEST(CheckSequence(graph.cbegin(),
+ CHECK(CheckSequence(graph.cbegin(),
graph.cend(),
&IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>,
@@ -284,7 +286,7 @@ void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputs)
+TEST_CASE("Conv2dValidateTensorShapesFromInputs")
{
Graph graph;
const unsigned int inputShape[] = { 1, 3, 8, 16 };
@@ -292,10 +294,10 @@ BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputs)
const unsigned int outputShape[] = { 1, 2, 4, 14 };
CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputsNhwc)
+TEST_CASE("Conv2dValidateTensorShapesFromInputsNhwc")
{
Graph graph;
const unsigned int inputShape[] = { 1, 8, 16, 3 };
@@ -303,7 +305,7 @@ BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputsNhwc)
const unsigned int outputShape[] = { 1, 4, 14, 2 };
CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
@@ -334,7 +336,7 @@ void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputSh
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputs)
+TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputs")
{
Graph graph;
const unsigned int inputShape[] = { 1, 2, 3, 3 };
@@ -342,10 +344,10 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputs)
const unsigned int outputShape[] = { 1, 2, 1, 1 };
CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
+TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputsNhwc")
{
Graph graph;
const unsigned int inputShape[] = { 1, 3, 3, 2 };
@@ -353,7 +355,7 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
const unsigned int outputShape[] = { 1, 1, 1, 2 };
CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
void CreatePooling2dGraph(Graph& graph, const unsigned int* inputShape, const unsigned int* outputShape,
@@ -384,24 +386,24 @@ void CreatePooling2dGraph(Graph& graph, const unsigned int* inputShape, const u
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputs)
+TEST_CASE("Pooling2dValidateTensorShapesFromInputs")
{
Graph graph;
const unsigned int inputShape[] = { 5, 3, 52, 60 };
const unsigned int outputShape[] = { 5, 3, 11, 13 };
CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputsNhwc)
+TEST_CASE("Pooling2dValidateTensorShapesFromInputsNhwc")
{
Graph graph;
const unsigned int inputShape[] = { 5, 52, 60, 3 };
const unsigned int outputShape[] = { 5, 11, 13, 3 };
CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
void CreateResizeBilinearGraph(Graph& graph,
@@ -429,24 +431,24 @@ void CreateResizeBilinearGraph(Graph& graph,
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputs)
+TEST_CASE("ResizeBilinearValidateTensorShapesFromInputs")
{
Graph graph;
const unsigned int inputShape[] = { 1, 2, 4, 5 };
const unsigned int outputShape[] = { 1, 2, 3, 4 };
CreateResizeBilinearGraph(graph, inputShape, outputShape);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputsNhwc)
+TEST_CASE("ResizeBilinearValidateTensorShapesFromInputsNhwc")
{
Graph graph;
const unsigned int inputShape[] = { 1, 4, 5, 2 };
const unsigned int outputShape[] = { 1, 3, 4, 2 };
CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
void CreateGatherGraph(Graph& graph,
@@ -470,7 +472,7 @@ void CreateGatherGraph(Graph& graph,
layer->GetOutputSlot().Connect(output->GetInputSlot(0));
}
-BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs)
+TEST_CASE("GatherValidateTensorShapesFromInputs")
{
Graph graph;
armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
@@ -479,10 +481,10 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs)
CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs1DParams)
+TEST_CASE("GatherValidateTensorShapesFromInputs1DParams")
{
Graph graph;
armnn::TensorInfo paramsInfo({8}, DataType::Float32);
@@ -491,10 +493,10 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs1DParams)
CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
+TEST_CASE("GatherValidateTensorShapesFromInputsMultiDimIndices")
{
Graph graph;
armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
@@ -503,10 +505,10 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
-BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
+TEST_CASE("DetectionPostProcessValidateTensorShapes")
{
Graph graph;
armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
@@ -538,7 +540,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
- BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+ CHECK_NOTHROW(graph.InferTensorInfos());
}
class MockLayerSupport : public LayerSupportBase
@@ -612,21 +614,21 @@ public:
};
};
-BOOST_AUTO_TEST_CASE(BackendCapabilityTest)
+TEST_CASE("BackendCapabilityTest")
{
BackendId backendId = "MockBackend";
armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
// MockBackend does not support the NonConstWeights capability
- BOOST_CHECK(!armnn::HasCapability(nonConstWeights, backendId));
- BOOST_CHECK(!armnn::HasCapability("NonConstWeights", backendId));
+ CHECK(!armnn::HasCapability(nonConstWeights, backendId));
+ CHECK(!armnn::HasCapability("NonConstWeights", backendId));
// MockBackend does not support the AsyncExecution capability
- BOOST_CHECK(!armnn::GetCapability("AsyncExecution", backendId).has_value());
+ CHECK(!armnn::GetCapability("AsyncExecution", backendId).has_value());
}
-BOOST_AUTO_TEST_CASE(BackendHintTest)
+TEST_CASE("BackendHintTest")
{
class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
{
@@ -635,14 +637,14 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
{
IgnoreUnused(id, name);
auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
- BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
+ CHECK((inputLayer->GetBackendId() == "MockBackend"));
}
void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
{
IgnoreUnused(id, name);
auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
- BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
+ CHECK((outputLayer->GetBackendId() == "MockBackend"));
}
void VisitActivationLayer(const IConnectableLayer* layer,
@@ -651,7 +653,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
{
IgnoreUnused(activationDescriptor, name);
auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
- BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
+ CHECK((activation->GetBackendId() == "CustomBackend"));
}
};
@@ -718,7 +720,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
lastLayer,
EmptyOptional());
- BOOST_TEST(res.IsOk());
+ CHECK(res.IsOk());
TestBackendAssignment visitor;
for (auto it = firstLayer; it != lastLayer; ++it)
@@ -728,7 +730,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
}
// Tests that OptimizeForExclusiveConnections works, fusing when needed, using BatchNorm fusing as example
-BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
+TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
{
using namespace armnn;
// Define layers information
@@ -786,8 +788,8 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
- BOOST_CHECK(4 == graph.GetNumLayers());
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(4 == graph.GetNumLayers());
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<Convolution2dLayer>,
&IsLayerOfType<BatchNormalizationLayer>,
@@ -801,15 +803,15 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
(layer->GetNameStr() == "fused-batchNorm-into-convolution");
};
- BOOST_CHECK(3 == graph.GetNumLayers());
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(3 == graph.GetNumLayers());
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkFusedConv2d,
&IsLayerOfType<OutputLayer>));
}
// Tests that OptimizeForExclusiveConnections works, not fusing when not needed, using BatchNorm fusing as example
-BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsWithoutFuseTest)
+TEST_CASE("OptimizeForExclusiveConnectionsWithoutFuseTest")
{
// Define the network
Graph graph;
@@ -828,8 +830,8 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsWithoutFuseTest)
batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
- BOOST_CHECK(5 == graph.GetNumLayers());
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(5 == graph.GetNumLayers());
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::Convolution2dLayer>,
&IsLayerOfType<armnn::BatchNormalizationLayer>,
@@ -838,12 +840,12 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsWithoutFuseTest)
// Optimize graph
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FuseBatchNormIntoConvolution2DFloat32()));
- BOOST_CHECK(5 == graph.GetNumLayers());
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(5 == graph.GetNumLayers());
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::Convolution2dLayer>,
&IsLayerOfType<armnn::BatchNormalizationLayer>,
&IsLayerOfType<armnn::OutputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
index 73c96431fb..17e978cf29 100644
--- a/src/armnn/test/OptionalTest.cpp
+++ b/src/armnn/test/OptionalTest.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <armnn/Optional.hpp>
#include <string>
@@ -24,46 +24,46 @@ void PassStringRefWithDefault(armnn::Optional<std::string&> value = armnn::Empty
} // namespace <anonymous>
-BOOST_AUTO_TEST_SUITE(OptionalTests)
-
-BOOST_AUTO_TEST_CASE(SimpleStringTests)
+TEST_SUITE("OptionalTests")
+{
+TEST_CASE("SimpleStringTests")
{
armnn::Optional<std::string> optionalString;
- BOOST_TEST(static_cast<bool>(optionalString) == false);
- BOOST_TEST(optionalString.has_value() == false);
- BOOST_TEST((optionalString == armnn::Optional<std::string>()));
+ CHECK(static_cast<bool>(optionalString) == false);
+ CHECK(optionalString.has_value() == false);
+ CHECK((optionalString == armnn::Optional<std::string>()));
optionalString = std::string("Hello World");
- BOOST_TEST(static_cast<bool>(optionalString) == true);
- BOOST_TEST(optionalString.has_value() == true);
- BOOST_TEST(optionalString.value() == "Hello World");
- BOOST_TEST((optionalString == armnn::Optional<std::string>("Hello World")));
+ CHECK(static_cast<bool>(optionalString) == true);
+ CHECK(optionalString.has_value() == true);
+ CHECK(optionalString.value() == "Hello World");
+ CHECK((optionalString == armnn::Optional<std::string>("Hello World")));
armnn::Optional<std::string> otherString;
otherString = optionalString;
- BOOST_TEST(static_cast<bool>(otherString) == true);
- BOOST_TEST(otherString.value() == "Hello World");
+ CHECK(static_cast<bool>(otherString) == true);
+ CHECK(otherString.value() == "Hello World");
optionalString.reset();
- BOOST_TEST(static_cast<bool>(optionalString) == false);
- BOOST_TEST(optionalString.has_value() == false);
+ CHECK(static_cast<bool>(optionalString) == false);
+ CHECK(optionalString.has_value() == false);
const std::string stringValue("Hello World");
armnn::Optional<std::string> optionalString2(stringValue);
- BOOST_TEST(static_cast<bool>(optionalString2) == true);
- BOOST_TEST(optionalString2.has_value() == true);
- BOOST_TEST(optionalString2.value() == "Hello World");
+ CHECK(static_cast<bool>(optionalString2) == true);
+ CHECK(optionalString2.has_value() == true);
+ CHECK(optionalString2.value() == "Hello World");
armnn::Optional<std::string> optionalString3(std::move(optionalString2));
- BOOST_TEST(static_cast<bool>(optionalString3) == true);
- BOOST_TEST(optionalString3.has_value() == true);
- BOOST_TEST(optionalString3.value() == "Hello World");
+ CHECK(static_cast<bool>(optionalString3) == true);
+ CHECK(optionalString3.has_value() == true);
+ CHECK(optionalString3.value() == "Hello World");
}
-BOOST_AUTO_TEST_CASE(StringRefTests)
+TEST_CASE("StringRefTests")
{
armnn::Optional<std::string&> optionalStringRef{armnn::EmptyOptional()};
- BOOST_TEST(optionalStringRef.has_value() == false);
+ CHECK(optionalStringRef.has_value() == false);
PassStringRef(optionalStringRef);
PassStringRefWithDefault();
@@ -74,51 +74,51 @@ BOOST_AUTO_TEST_CASE(StringRefTests)
std::string& helloWorldRef = helloWorld;
armnn::Optional<std::string&> optionalHelloRef = helloWorldRef;
- BOOST_TEST(optionalHelloRef.has_value() == true);
- BOOST_TEST(optionalHelloRef.value() == "Hello World");
+ CHECK(optionalHelloRef.has_value() == true);
+ CHECK(optionalHelloRef.value() == "Hello World");
armnn::Optional<std::string&> optionalHelloRef2 = helloWorld;
- BOOST_TEST(optionalHelloRef2.has_value() == true);
- BOOST_TEST(optionalHelloRef2.value() == "Hello World");
+ CHECK(optionalHelloRef2.has_value() == true);
+ CHECK(optionalHelloRef2.value() == "Hello World");
armnn::Optional<std::string&> optionalHelloRef3{helloWorldRef};
- BOOST_TEST(optionalHelloRef3.has_value() == true);
- BOOST_TEST(optionalHelloRef3.value() == "Hello World");
+ CHECK(optionalHelloRef3.has_value() == true);
+ CHECK(optionalHelloRef3.value() == "Hello World");
armnn::Optional<std::string&> optionalHelloRef4{helloWorld};
- BOOST_TEST(optionalHelloRef4.has_value() == true);
- BOOST_TEST(optionalHelloRef4.value() == "Hello World");
+ CHECK(optionalHelloRef4.has_value() == true);
+ CHECK(optionalHelloRef4.value() == "Hello World");
// modify through the optional reference
optionalHelloRef4.value().assign("Long Other String");
- BOOST_TEST(helloWorld == "Long Other String");
- BOOST_TEST(optionalHelloRef.value() == "Long Other String");
- BOOST_TEST(optionalHelloRef2.value() == "Long Other String");
- BOOST_TEST(optionalHelloRef3.value() == "Long Other String");
+ CHECK(helloWorld == "Long Other String");
+ CHECK(optionalHelloRef.value() == "Long Other String");
+ CHECK(optionalHelloRef2.value() == "Long Other String");
+ CHECK(optionalHelloRef3.value() == "Long Other String");
}
-BOOST_AUTO_TEST_CASE(SimpleIntTests)
+TEST_CASE("SimpleIntTests")
{
const int intValue = 123;
armnn::Optional<int> optionalInt;
- BOOST_TEST(static_cast<bool>(optionalInt) == false);
- BOOST_TEST(optionalInt.has_value() == false);
- BOOST_TEST((optionalInt == armnn::Optional<int>()));
+ CHECK(static_cast<bool>(optionalInt) == false);
+ CHECK(optionalInt.has_value() == false);
+ CHECK((optionalInt == armnn::Optional<int>()));
optionalInt = intValue;
- BOOST_TEST(static_cast<bool>(optionalInt) == true);
- BOOST_TEST(optionalInt.has_value() == true);
- BOOST_TEST(optionalInt.value() == intValue);
- BOOST_TEST((optionalInt == armnn::Optional<int>(intValue)));
+ CHECK(static_cast<bool>(optionalInt) == true);
+ CHECK(optionalInt.has_value() == true);
+ CHECK(optionalInt.value() == intValue);
+ CHECK((optionalInt == armnn::Optional<int>(intValue)));
armnn::Optional<int> otherOptionalInt;
otherOptionalInt = optionalInt;
- BOOST_TEST(static_cast<bool>(otherOptionalInt) == true);
- BOOST_TEST(otherOptionalInt.value() == intValue);
+ CHECK(static_cast<bool>(otherOptionalInt) == true);
+ CHECK(otherOptionalInt.value() == intValue);
}
-BOOST_AUTO_TEST_CASE(ObjectConstructedInPlaceTests)
+TEST_CASE("ObjectConstructedInPlaceTests")
{
struct SimpleObject
{
@@ -145,15 +145,15 @@ BOOST_AUTO_TEST_CASE(ObjectConstructedInPlaceTests)
// Use MakeOptional
armnn::Optional<SimpleObject> optionalObject1 = armnn::MakeOptional<SimpleObject>(objectName, objectValue);
- BOOST_CHECK(static_cast<bool>(optionalObject1) == true);
- BOOST_CHECK(optionalObject1.has_value() == true);
- BOOST_CHECK(optionalObject1.value() == referenceObject);
+ CHECK(static_cast<bool>(optionalObject1) == true);
+ CHECK(optionalObject1.has_value() == true);
+ CHECK((optionalObject1.value() == referenceObject));
// Call in-place constructor directly
armnn::Optional<SimpleObject> optionalObject2(CONSTRUCT_IN_PLACE, objectName, objectValue);
- BOOST_CHECK(static_cast<bool>(optionalObject1) == true);
- BOOST_CHECK(optionalObject1.has_value() == true);
- BOOST_CHECK(optionalObject1.value() == referenceObject);
+ CHECK(static_cast<bool>(optionalObject1) == true);
+ CHECK(optionalObject1.has_value() == true);
+ CHECK((optionalObject1.value() == referenceObject));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index 21900ffb9a..b27ad5a7a5 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -7,7 +7,7 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <memory>
#include <thread>
@@ -34,7 +34,7 @@ namespace
void RegisterUnregisterProfilerSingleThreadImpl(bool &res)
{
- // Important! Don't use BOOST_TEST macros in this function as they
+ // Important! Don't use CHECK macros in this function as they
// seem to have problems when used in threads
// Get a reference to the profiler manager.
@@ -61,36 +61,36 @@ void RegisterUnregisterProfilerSingleThreadImpl(bool &res)
} // namespace
-BOOST_AUTO_TEST_SUITE(Profiler)
-
-BOOST_AUTO_TEST_CASE(EnableDisableProfiling)
+TEST_SUITE("Profiler")
+{
+TEST_CASE("EnableDisableProfiling")
{
std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
// Check that profiling is disabled by default.
- BOOST_TEST(!profiler->IsProfilingEnabled());
+ CHECK(!profiler->IsProfilingEnabled());
// Enable profiling.
profiler->EnableProfiling(true);
// Check that profiling is enabled.
- BOOST_TEST(profiler->IsProfilingEnabled());
+ CHECK(profiler->IsProfilingEnabled());
// Disable profiling.
profiler->EnableProfiling(false);
// Check that profiling is disabled.
- BOOST_TEST(!profiler->IsProfilingEnabled());
+ CHECK(!profiler->IsProfilingEnabled());
}
-BOOST_AUTO_TEST_CASE(RegisterUnregisterProfilerSingleThread)
+TEST_CASE("RegisterUnregisterProfilerSingleThread")
{
bool res = false;
RegisterUnregisterProfilerSingleThreadImpl(res);
- BOOST_TEST(res);
+ CHECK(res);
}
-BOOST_AUTO_TEST_CASE(RegisterUnregisterProfilerMultipleThreads)
+TEST_CASE("RegisterUnregisterProfilerMultipleThreads")
{
bool res[3] = {false, false, false};
std::thread thread1([&res]() { RegisterUnregisterProfilerSingleThreadImpl(res[0]); });
@@ -103,11 +103,11 @@ BOOST_AUTO_TEST_CASE(RegisterUnregisterProfilerMultipleThreads)
for (int i = 0 ; i < 3 ; i++)
{
- BOOST_TEST(res[i]);
+ CHECK(res[i]);
}
}
-BOOST_AUTO_TEST_CASE(ProfilingMacros)
+TEST_CASE("ProfilingMacros")
{
// Get a reference to the profiler manager.
armnn::ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -115,13 +115,13 @@ BOOST_AUTO_TEST_CASE(ProfilingMacros)
{ // --- No profiler ---
// Check that there's no profiler registered for this thread.
- BOOST_TEST(!profilerManager.GetProfiler());
+ CHECK(!profilerManager.GetProfiler());
// Test scoped event.
{ ARMNN_SCOPED_PROFILING_EVENT(armnn::Compute::CpuAcc, "test"); }
// Check that we still cannot get a profiler for this thread.
- BOOST_TEST(!profilerManager.GetProfiler());
+ CHECK(!profilerManager.GetProfiler());
}
// Create and register a profiler for this thread.
@@ -138,7 +138,7 @@ BOOST_AUTO_TEST_CASE(ProfilingMacros)
// Check that no profiling event has been added to the sequence.
size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
- BOOST_TEST(eventSequenceSizeBefore == eventSequenceSizeAfter);
+ CHECK(eventSequenceSizeBefore == eventSequenceSizeAfter);
}
// Enable profiling.
@@ -154,7 +154,7 @@ BOOST_AUTO_TEST_CASE(ProfilingMacros)
// Check that a profiling event has been added to the sequence.
size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
- BOOST_TEST(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
+ CHECK(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
}
// Disable profiling here to not print out anything on stdout.
@@ -165,13 +165,13 @@ BOOST_AUTO_TEST_CASE(ProfilingMacros)
// This test unit needs the reference backend, it's not available if the reference backend is not built
-BOOST_AUTO_TEST_CASE(RuntimeLoadNetwork)
+TEST_CASE("RuntimeLoadNetwork")
{
// Get a reference to the profiler manager.
armnn::ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
// Check that there's no profiler registered for this thread.
- BOOST_TEST(!profilerManager.GetProfiler());
+ CHECK(!profilerManager.GetProfiler());
// Build a mock-network and load it into the runtime.
armnn::IRuntime::CreationOptions options;
@@ -183,18 +183,18 @@ BOOST_AUTO_TEST_CASE(RuntimeLoadNetwork)
runtime->LoadNetwork(networkIdentifier, armnn::Optimize(*mockNetwork, backends, runtime->GetDeviceSpec()));
// Check that now there's a profiler registered for this thread (created and registered by the loading the network).
- BOOST_TEST(profilerManager.GetProfiler());
+ CHECK(profilerManager.GetProfiler());
// Unload the network.
runtime->UnloadNetwork(networkIdentifier);
// Check that the profiler has been un-registered for this thread.
- BOOST_TEST(!profilerManager.GetProfiler());
+ CHECK(!profilerManager.GetProfiler());
}
#endif
-BOOST_AUTO_TEST_CASE(WriteEventResults)
+TEST_CASE("WriteEventResults")
{
// Get a reference to the profiler manager.
armnn::ProfilerManager& profileManager = armnn::ProfilerManager::GetInstance();
@@ -222,35 +222,35 @@ BOOST_AUTO_TEST_CASE(WriteEventResults)
// Check that a profiling event has been added to the sequence.
size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
- BOOST_TEST(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
+ CHECK(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
std::ostringstream output;
profiler->AnalyzeEventsAndWriteResults(output);
- BOOST_TEST(!output.str().empty());
+ CHECK(!output.str().empty());
// output should contain event name 'test'
- BOOST_CHECK(output.str().find("test") != std::string::npos);
+ CHECK(output.str().find("test") != std::string::npos);
// output should contain headers
- BOOST_CHECK(output.str().find("Event Sequence - Name") != std::string::npos);
- BOOST_CHECK(output.str().find("Event Stats - Name") != std::string::npos);
- BOOST_CHECK(output.str().find("Total") != std::string::npos);
- BOOST_CHECK(output.str().find("Device") != std::string::npos);
+ CHECK(output.str().find("Event Sequence - Name") != std::string::npos);
+ CHECK(output.str().find("Event Stats - Name") != std::string::npos);
+ CHECK(output.str().find("Total") != std::string::npos);
+ CHECK(output.str().find("Device") != std::string::npos);
// output should contain compute device 'CpuAcc'
- BOOST_CHECK(output.str().find("CpuAcc") != std::string::npos);
+ CHECK(output.str().find("CpuAcc") != std::string::npos);
// output should not contain un-readable numbers
- BOOST_CHECK(output.str().find("e+") == std::string::npos);
+ CHECK(output.str().find("e+") == std::string::npos);
// output should not contain un-readable numbers
- BOOST_CHECK(output.str().find("+") == std::string::npos);
+ CHECK(output.str().find("+") == std::string::npos);
// output should not contain zero value
- BOOST_CHECK(output.str().find(" 0 ") == std::string::npos);
+ CHECK(output.str().find(" 0 ") == std::string::npos);
}
// Disable profiling here to not print out anything on stdout.
profiler->EnableProfiling(false);
}
-BOOST_AUTO_TEST_CASE(ProfilerJsonPrinter)
+TEST_CASE("ProfilerJsonPrinter")
{
class TestInstrument : public armnn::Instrument
{
@@ -350,8 +350,8 @@ BOOST_AUTO_TEST_CASE(ProfilerJsonPrinter)
"2.000000\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\""
"unit\": \"us\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n");
- BOOST_CHECK(output == blessedOutput);
+ CHECK(output == blessedOutput);
armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
}
-BOOST_AUTO_TEST_SUITE_END();
+}
diff --git a/src/armnn/test/ProfilingEventTest.cpp b/src/armnn/test/ProfilingEventTest.cpp
index 0add8365e9..1e3d1eac7f 100644
--- a/src/armnn/test/ProfilingEventTest.cpp
+++ b/src/armnn/test/ProfilingEventTest.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include "ProfilingEvent.hpp"
#include "Profiling.hpp"
@@ -12,9 +12,9 @@
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(ProfilingEvent)
-
-BOOST_AUTO_TEST_CASE(ProfilingEventTest)
+TEST_SUITE("ProfilingEvent")
+{
+TEST_CASE("ProfilingEventTest")
{
// Get a reference to the profiler manager.
armnn::ProfilerManager& profileManager = armnn::ProfilerManager::GetInstance();
@@ -29,7 +29,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
BackendId(),
std::move(insts1));
- BOOST_CHECK_EQUAL(testEvent.GetName(), "EventName");
+ CHECK_EQ(testEvent.GetName(), "EventName");
// start the timer - outer
testEvent.Start();
@@ -40,7 +40,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
// stop the timer - outer
testEvent.Stop();
- BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
+ CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
// create a sub event with CpuAcc
BackendId cpuAccBackendId(Compute::CpuAcc);
@@ -52,12 +52,12 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
cpuAccBackendId,
std::move(insts2));
- BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
- BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
- BOOST_CHECK(cpuAccBackendId == testEvent2.GetBackendId());
+ CHECK_EQ(&testEvent, testEvent2.GetParentEvent());
+ CHECK_EQ(profileManager.GetProfiler(), testEvent2.GetProfiler());
+ CHECK(cpuAccBackendId == testEvent2.GetBackendId());
}
-BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
+TEST_CASE("ProfilingEventTestOnGpuAcc")
{
// Get a reference to the profiler manager.
armnn::ProfilerManager& profileManager = armnn::ProfilerManager::GetInstance();
@@ -72,7 +72,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
BackendId(),
std::move(insts1));
- BOOST_CHECK_EQUAL(testEvent.GetName(), "GPUEvent");
+ CHECK_EQ(testEvent.GetName(), "GPUEvent");
// start the timer - outer
testEvent.Start();
@@ -83,7 +83,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
// stop the timer - outer
testEvent.Stop();
- BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
+ CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
// create a sub event
BackendId gpuAccBackendId(Compute::GpuAcc);
@@ -95,9 +95,9 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
gpuAccBackendId,
std::move(insts2));
- BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
- BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
- BOOST_CHECK(gpuAccBackendId == testEvent2.GetBackendId());
+ CHECK_EQ(&testEvent, testEvent2.GetParentEvent());
+ CHECK_EQ(profileManager.GetProfiler(), testEvent2.GetProfiler());
+ CHECK(gpuAccBackendId == testEvent2.GetBackendId());
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index c5457d03f3..3db95445b4 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -20,7 +20,7 @@
#include <valgrind/memcheck.h>
#endif
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include "RuntimeTests.hpp"
#include "TestUtils.hpp"
@@ -34,9 +34,9 @@ void RuntimeLoadedNetworksReserve(armnn::RuntimeImpl* runtime)
}
-BOOST_AUTO_TEST_SUITE(Runtime)
-
-BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork)
+TEST_SUITE("Runtime")
+{
+TEST_CASE("RuntimeUnloadNetwork")
{
// build 2 mock-networks and load them into the runtime
armnn::IRuntime::CreationOptions options;
@@ -56,9 +56,9 @@ BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork)
runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, backends, runtime->GetDeviceSpec()));
// Unloads one by its networkID.
- BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
+ CHECK(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
- BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
+ CHECK(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
}
// Note: the current builds we don't do valgrind and gperftools based leak checking at the same
@@ -76,26 +76,24 @@ struct DisableGlobalLeakChecking
}
};
-BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
-
-BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
+TEST_CASE_FIXTURE(DisableGlobalLeakChecking, "RuntimeHeapMemoryUsageSanityChecks")
{
- BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+ CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
{
ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Outer");
{
ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Inner");
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE() == true);
+ CHECK(ARMNN_NO_LEAKS_IN_SCOPE() == true);
std::unique_ptr<char[]> dummyAllocation(new char[1000]);
- BOOST_CHECK_MESSAGE(ARMNN_NO_LEAKS_IN_SCOPE() == false,
- "A leak of 1000 bytes is expected here. "
- "Please make sure environment variable: HEAPCHECK=draconian is set!");
- BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
- BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
+ // "A leak of 1000 bytes is expected here. "
+ // "Please make sure environment variable: HEAPCHECK=draconian is set!"
+ CHECK((ARMNN_NO_LEAKS_IN_SCOPE() == false));
+ CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
+ CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
}
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
- BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+ CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+ CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+ CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
}
}
@@ -105,8 +103,9 @@ BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
#ifdef WITH_VALGRIND
// Run with the following command to get all the amazing output (in the devenv/build folder) :)
// valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
+TEST_CASE("RuntimeMemoryLeak")
{
+ MESSAGE("RuntimeMemoryLeak");
// From documentation:
// This means that no pointer to the block can be found. The block is classified as "lost",
@@ -155,8 +154,8 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
}
// If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
- BOOST_TEST(leakedBefore == leakedAfter);
- BOOST_TEST(reachableBefore == reachableAfter);
+ CHECK(leakedBefore == leakedAfter);
+ CHECK(reachableBefore == reachableAfter);
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
@@ -165,7 +164,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
}
#endif // WITH_VALGRIND
-BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
+TEST_CASE("RuntimeCpuRef")
{
using namespace armnn;
@@ -196,10 +195,10 @@ BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
// Load it into the runtime. It should success.
armnn::NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
}
-BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef)
+TEST_CASE("RuntimeFallbackToCpuRef")
{
using namespace armnn;
@@ -231,10 +230,10 @@ BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef)
// Load it into the runtime. It should succeed.
armnn::NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
}
-BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
+TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue")
{
// Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
using namespace armnn;
@@ -270,16 +269,16 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
runtime->GetDeviceSpec(),
OptimizerOptions(),
errMessages);
- BOOST_FAIL("An exception should have been thrown");
+ FAIL("An exception should have been thrown");
}
catch (const InvalidArgumentException& e)
{
// Different exceptions are thrown on different backends
}
- BOOST_CHECK(errMessages.size() > 0);
+ CHECK(errMessages.size() > 0);
}
-BOOST_AUTO_TEST_CASE(RuntimeBackendOptions)
+TEST_CASE("RuntimeBackendOptions")
{
using namespace armnn;
@@ -307,27 +306,27 @@ BOOST_AUTO_TEST_CASE(RuntimeBackendOptions)
// First group
- BOOST_TEST(backendOptions[0].GetBackendId().Get() == "FakeBackend1");
- BOOST_TEST(backendOptions[0].GetOption(0).GetName() == "Option1");
- BOOST_TEST(backendOptions[0].GetOption(0).GetValue().IsFloat() == true);
- BOOST_TEST(backendOptions[0].GetOption(0).GetValue().AsFloat() == 1.3f);
+ CHECK(backendOptions[0].GetBackendId().Get() == "FakeBackend1");
+ CHECK(backendOptions[0].GetOption(0).GetName() == "Option1");
+ CHECK(backendOptions[0].GetOption(0).GetValue().IsFloat() == true);
+ CHECK(backendOptions[0].GetOption(0).GetValue().AsFloat() == 1.3f);
- BOOST_TEST(backendOptions[0].GetOption(1).GetName() == "Option2");
- BOOST_TEST(backendOptions[0].GetOption(1).GetValue().IsBool() == true);
- BOOST_TEST(backendOptions[0].GetOption(1).GetValue().AsBool() == true);
+ CHECK(backendOptions[0].GetOption(1).GetName() == "Option2");
+ CHECK(backendOptions[0].GetOption(1).GetValue().IsBool() == true);
+ CHECK(backendOptions[0].GetOption(1).GetValue().AsBool() == true);
- BOOST_TEST(backendOptions[0].GetOption(2).GetName() == "Option3");
- BOOST_TEST(backendOptions[0].GetOption(2).GetValue().IsString() == true);
- BOOST_TEST(backendOptions[0].GetOption(2).GetValue().AsString() == "some_value");
+ CHECK(backendOptions[0].GetOption(2).GetName() == "Option3");
+ CHECK(backendOptions[0].GetOption(2).GetValue().IsString() == true);
+ CHECK(backendOptions[0].GetOption(2).GetValue().AsString() == "some_value");
// Second group
- BOOST_TEST(backendOptions[1].GetBackendId().Get() == "FakeBackend1");
- BOOST_TEST(backendOptions[1].GetOption(0).GetName() == "Option4");
- BOOST_TEST(backendOptions[1].GetOption(0).GetValue().IsInt() == true);
- BOOST_TEST(backendOptions[1].GetOption(0).GetValue().AsInt() == 42);
+ CHECK(backendOptions[1].GetBackendId().Get() == "FakeBackend1");
+ CHECK(backendOptions[1].GetOption(0).GetName() == "Option4");
+ CHECK(backendOptions[1].GetOption(0).GetValue().IsInt() == true);
+ CHECK(backendOptions[1].GetOption(0).GetValue().AsInt() == 42);
}
-BOOST_AUTO_TEST_CASE(ProfilingDisable)
+TEST_CASE("ProfilingDisable")
{
using namespace armnn;
@@ -358,17 +357,17 @@ BOOST_AUTO_TEST_CASE(ProfilingDisable)
// Load it into the runtime. It should succeed.
armnn::NetworkId netId;
- BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
profiling::ProfilingServiceRuntimeHelper profilingServiceHelper(GetProfilingService(&runtime));
profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
auto readableBuffer = bufferManager.GetReadableBuffer();
// Profiling is not enabled, the post-optimisation structure should not be created
- BOOST_TEST(!readableBuffer);
+ CHECK(!readableBuffer);
}
-BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
+TEST_CASE("ProfilingEnableCpuRef")
{
using namespace armnn;
using namespace armnn::profiling;
@@ -410,29 +409,29 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
// Load it into the runtime. It should succeed.
armnn::NetworkId netId;
- BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
auto readableBuffer = bufferManager.GetReadableBuffer();
// Profiling is enabled, the post-optimisation structure should be created
- BOOST_CHECK(readableBuffer != nullptr);
+ CHECK(readableBuffer != nullptr);
unsigned int size = readableBuffer->GetSize();
const unsigned char* readableData = readableBuffer->GetReadableData();
- BOOST_CHECK(readableData != nullptr);
+ CHECK(readableData != nullptr);
unsigned int offset = 0;
// Verify Header
VerifyTimelineHeaderBinary(readableData, offset, size - 8);
- BOOST_TEST_MESSAGE("HEADER OK");
+ MESSAGE("HEADER OK");
// Post-optimisation network
// Network entity
VerifyTimelineEntityBinaryPacketData(optNetGuid, readableData, offset);
- BOOST_TEST_MESSAGE("NETWORK ENTITY OK");
+ MESSAGE("NETWORK ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -442,7 +441,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK TYPE RELATIONSHIP OK");
+ MESSAGE("NETWORK TYPE RELATIONSHIP OK");
// Network - START OF LIFE
ProfilingGuid networkSolEventGuid = VerifyTimelineEventBinaryPacket(EmptyOptional(),
@@ -450,7 +449,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
EmptyOptional(),
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK START OF LIFE EVENT OK");
+ MESSAGE("NETWORK START OF LIFE EVENT OK");
// Network - START OF LIFE event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -460,7 +459,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK START OF LIFE RELATIONSHIP OK");
+ MESSAGE("NETWORK START OF LIFE RELATIONSHIP OK");
// Process ID Label
int processID = armnnUtils::Processes::GetCurrentId();
@@ -468,7 +467,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
ss << processID;
std::string processIdLabel = ss.str();
VerifyTimelineLabelBinaryPacketData(EmptyOptional(), processIdLabel, readableData, offset);
- BOOST_TEST_MESSAGE("PROCESS ID LABEL OK");
+ MESSAGE("PROCESS ID LABEL OK");
// Entity - Process ID relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -478,16 +477,16 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::PROCESS_ID_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK PROCESS ID RELATIONSHIP OK");
+ MESSAGE("NETWORK PROCESS ID RELATIONSHIP OK");
// Input layer
// Input layer entity
VerifyTimelineEntityBinaryPacketData(input->GetGuid(), readableData, offset);
- BOOST_TEST_MESSAGE("INPUT ENTITY OK");
+ MESSAGE("INPUT ENTITY OK");
// Name Entity
ProfilingGuid inputLabelGuid = VerifyTimelineLabelBinaryPacketData(EmptyOptional(), "input", readableData, offset);
- BOOST_TEST_MESSAGE("INPUT NAME LABEL OK");
+ MESSAGE("INPUT NAME LABEL OK");
// Entity - Name relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -497,7 +496,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::NAME_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT NAME RELATIONSHIP OK");
+ MESSAGE("INPUT NAME RELATIONSHIP OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -507,7 +506,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT TYPE RELATIONSHIP OK");
+ MESSAGE("INPUT TYPE RELATIONSHIP OK");
// Network - Input layer relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -517,17 +516,17 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK - INPUT CHILD RELATIONSHIP OK");
+ MESSAGE("NETWORK - INPUT CHILD RELATIONSHIP OK");
// Normalization layer
// Normalization layer entity
VerifyTimelineEntityBinaryPacketData(normalize->GetGuid(), readableData, offset);
- BOOST_TEST_MESSAGE("NORMALIZATION LAYER ENTITY OK");
+ MESSAGE("NORMALIZATION LAYER ENTITY OK");
// Name entity
ProfilingGuid normalizationLayerNameGuid = VerifyTimelineLabelBinaryPacketData(
EmptyOptional(), "normalization", readableData, offset);
- BOOST_TEST_MESSAGE("NORMALIZATION LAYER NAME LABEL OK");
+ MESSAGE("NORMALIZATION LAYER NAME LABEL OK");
// Entity - Name relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -537,7 +536,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::NAME_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION LAYER NAME RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION LAYER NAME RELATIONSHIP OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -547,7 +546,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION LAYER TYPE RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION LAYER TYPE RELATIONSHIP OK");
// Network - Normalize layer relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -557,7 +556,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK - NORMALIZATION LAYER CHILD RELATIONSHIP OK");
+ MESSAGE("NETWORK - NORMALIZATION LAYER CHILD RELATIONSHIP OK");
// Input layer - Normalize layer relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -567,13 +566,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CONNECTION_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT - NORMALIZATION LAYER CONNECTION OK");
+ MESSAGE("INPUT - NORMALIZATION LAYER CONNECTION OK");
// Normalization workload
// Normalization workload entity
ProfilingGuid normalizationWorkloadGuid = VerifyTimelineEntityBinaryPacketData(
EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD ENTITY OK");
+ MESSAGE("NORMALIZATION WORKLOAD ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -583,12 +582,12 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD TYPE RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION WORKLOAD TYPE RELATIONSHIP OK");
// BackendId entity
ProfilingGuid cpuRefLabelGuid = VerifyTimelineLabelBinaryPacketData(
EmptyOptional(), "CpuRef", readableData, offset);
- BOOST_TEST_MESSAGE("CPUREF LABEL OK");
+ MESSAGE("CPUREF LABEL OK");
// Entity - BackendId relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -598,7 +597,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::BACKENDID_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD BACKEND ID RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION WORKLOAD BACKEND ID RELATIONSHIP OK");
// Normalize layer - Normalize workload relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -608,17 +607,17 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION LAYER - WORKLOAD CHILD RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION LAYER - WORKLOAD CHILD RELATIONSHIP OK");
// Output layer
// Output layer entity
VerifyTimelineEntityBinaryPacketData(output->GetGuid(), readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT LAYER ENTITY OK");
+ MESSAGE("OUTPUT LAYER ENTITY OK");
// Name entity
ProfilingGuid outputLabelGuid = VerifyTimelineLabelBinaryPacketData(
EmptyOptional(), "output", readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT LAYER NAME LABEL OK");
+ MESSAGE("OUTPUT LAYER NAME LABEL OK");
// Entity - Name relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -628,7 +627,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::NAME_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT LAYER NAME RELATIONSHIP OK");
+ MESSAGE("OUTPUT LAYER NAME RELATIONSHIP OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -638,7 +637,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT LAYER TYPE RELATIONSHIP OK");
+ MESSAGE("OUTPUT LAYER TYPE RELATIONSHIP OK");
// Network - Output layer relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -648,7 +647,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
+ MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
// Normalize layer - Output layer relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -658,7 +657,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CONNECTION_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZE LAYER - OUTPUT LAYER CONNECTION OK");
+ MESSAGE("NORMALIZE LAYER - OUTPUT LAYER CONNECTION OK");
bufferManager.MarkRead(readableBuffer);
@@ -680,33 +679,33 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
// Get readable buffer for input workload
auto inputReadableBuffer = bufferManager.GetReadableBuffer();
- BOOST_CHECK(inputReadableBuffer != nullptr);
+ CHECK(inputReadableBuffer != nullptr);
// Get readable buffer for output workload
auto outputReadableBuffer = bufferManager.GetReadableBuffer();
- BOOST_CHECK(outputReadableBuffer != nullptr);
+ CHECK(outputReadableBuffer != nullptr);
// Get readable buffer for inference timeline
auto inferenceReadableBuffer = bufferManager.GetReadableBuffer();
- BOOST_CHECK(inferenceReadableBuffer != nullptr);
+ CHECK(inferenceReadableBuffer != nullptr);
// Validate input workload data
size = inputReadableBuffer->GetSize();
- BOOST_CHECK(size == 164);
+ CHECK(size == 164);
readableData = inputReadableBuffer->GetReadableData();
- BOOST_CHECK(readableData != nullptr);
+ CHECK(readableData != nullptr);
offset = 0;
// Verify Header
VerifyTimelineHeaderBinary(readableData, offset, 156);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD HEADER OK");
+ MESSAGE("INPUT WORKLOAD HEADER OK");
// Input workload
// Input workload entity
ProfilingGuid inputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD ENTITY OK");
+ MESSAGE("INPUT WORKLOAD ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -716,12 +715,12 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
+ MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
// BackendId entity
ProfilingGuid CpuRefLabelGuid = VerifyTimelineLabelBinaryPacketData(
EmptyOptional(), "CpuRef", readableData, offset);
- BOOST_TEST_MESSAGE("CPUREF LABEL OK (INPUT WORKLOAD)");
+ MESSAGE("CPUREF LABEL OK (INPUT WORKLOAD)");
// Entity - BackendId relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -731,7 +730,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::BACKENDID_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
+ MESSAGE("INPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
// Input layer - Input workload relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -741,27 +740,27 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT LAYER - INPUT WORKLOAD CHILD RELATIONSHIP OK");
+ MESSAGE("INPUT LAYER - INPUT WORKLOAD CHILD RELATIONSHIP OK");
bufferManager.MarkRead(inputReadableBuffer);
// Validate output workload data
size = outputReadableBuffer->GetSize();
- BOOST_CHECK(size == 164);
+ CHECK(size == 164);
readableData = outputReadableBuffer->GetReadableData();
- BOOST_CHECK(readableData != nullptr);
+ CHECK(readableData != nullptr);
offset = 0;
// Verify Header
VerifyTimelineHeaderBinary(readableData, offset, 156);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD HEADER OK");
+ MESSAGE("OUTPUT WORKLOAD HEADER OK");
// Output workload
// Output workload entity
ProfilingGuid outputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD ENTITY OK");
+ MESSAGE("OUTPUT WORKLOAD ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -771,11 +770,11 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD TYPE RELATIONSHIP OK");
+ MESSAGE("OUTPUT WORKLOAD TYPE RELATIONSHIP OK");
// BackendId entity
VerifyTimelineLabelBinaryPacketData(EmptyOptional(), "CpuRef", readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD CPU REF LABEL OK");
+ MESSAGE("OUTPUT WORKLOAD CPU REF LABEL OK");
// Entity - BackendId relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -785,7 +784,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::BACKENDID_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
+ MESSAGE("OUTPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
// Output layer - Output workload relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -795,27 +794,27 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT LAYER - OUTPUT WORKLOAD CHILD RELATIONSHIP OK");
+ MESSAGE("OUTPUT LAYER - OUTPUT WORKLOAD CHILD RELATIONSHIP OK");
bufferManager.MarkRead(outputReadableBuffer);
// Validate inference data
size = inferenceReadableBuffer->GetSize();
- BOOST_CHECK(size == 976 + 8 * ThreadIdSize);
+ CHECK(size == 976 + 8 * ThreadIdSize);
readableData = inferenceReadableBuffer->GetReadableData();
- BOOST_CHECK(readableData != nullptr);
+ CHECK(readableData != nullptr);
offset = 0;
// Verify Header
VerifyTimelineHeaderBinary(readableData, offset, 968 + 8 * ThreadIdSize);
- BOOST_TEST_MESSAGE("INFERENCE HEADER OK");
+ MESSAGE("INFERENCE HEADER OK");
// Inference timeline trace
// Inference entity
ProfilingGuid inferenceGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("INFERENCE ENTITY OK");
+ MESSAGE("INFERENCE ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -825,7 +824,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INFERENCE TYPE RELATIONSHIP OK");
+ MESSAGE("INFERENCE TYPE RELATIONSHIP OK");
// Network - Inference relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -835,13 +834,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::EXECUTION_OF_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NETWORK - INFERENCE EXECUTION_OF RELATIONSHIP OK");
+ MESSAGE("NETWORK - INFERENCE EXECUTION_OF RELATIONSHIP OK");
// Start Inference life
// Event packet - timeline, threadId, eventGuid
ProfilingGuid inferenceEventGuid = VerifyTimelineEventBinaryPacket(
EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("INFERENCE START OF LIFE EVENT OK");
+ MESSAGE("INFERENCE START OF LIFE EVENT OK");
// Inference - event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -851,14 +850,14 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("INFERENCE START OF LIFE RELATIONSHIP OK");
+ MESSAGE("INFERENCE START OF LIFE RELATIONSHIP OK");
// Execution
// Input workload execution
// Input workload execution entity
ProfilingGuid inputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION ENTITY OK");
+ MESSAGE("INPUT WORKLOAD EXECUTION ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -868,7 +867,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+ MESSAGE("INPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
// Inference - Workload execution relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -878,7 +877,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INFERENCE - INPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+ MESSAGE("INFERENCE - INPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
// Workload - Workload execution relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -888,7 +887,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::EXECUTION_OF_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
+ MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
// Start Input workload execution life
// Event packet - timeline, threadId, eventGuid
@@ -903,7 +902,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
+ MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
// End of Input workload execution life
// Event packet - timeline, threadId, eventGuid
@@ -918,13 +917,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
+ MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
// Normalize workload execution
// Normalize workload execution entity
ProfilingGuid normalizeWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("NORMALIZE WORKLOAD EXECUTION ENTITY OK");
+ MESSAGE("NORMALIZE WORKLOAD EXECUTION ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -934,7 +933,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZE WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+ MESSAGE("NORMALIZE WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
// Inference - Workload execution relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -944,7 +943,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INFERENCE - NORMALIZE WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+ MESSAGE("INFERENCE - NORMALIZE WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
// Workload - Workload execution relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -954,13 +953,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::EXECUTION_OF_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD - NORMALIZATION WORKLOAD EXECUTION RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION WORKLOAD - NORMALIZATION WORKLOAD EXECUTION RELATIONSHIP OK");
// Start Normalize workload execution life
// Event packet - timeline, threadId, eventGuid
ProfilingGuid normalizationWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE EVENT OK");
+ MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE EVENT OK");
// Normalize workload execution - event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -970,13 +969,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
// End of Normalize workload execution life
// Event packet - timeline, threadId, eventGuid
ProfilingGuid normalizationWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE EVENT OK");
+ MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE EVENT OK");
// Normalize workload execution - event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -986,13 +985,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
+ MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
// Output workload execution
// Output workload execution entity
ProfilingGuid outputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION ENTITY OK");
+ MESSAGE("OUTPUT WORKLOAD EXECUTION ENTITY OK");
// Entity - Type relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -1002,7 +1001,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::TYPE_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+ MESSAGE("OUTPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
// Inference - Workload execution relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1012,7 +1011,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::CHILD_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("INFERENCE - OUTPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+ MESSAGE("INFERENCE - OUTPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
// Workload - Workload execution relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1022,13 +1021,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::EXECUTION_OF_GUID,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD - OUTPUT WORKLOAD EXECUTION EXECUTION_OF RELATIONSHIP OK");
+ MESSAGE("OUTPUT WORKLOAD - OUTPUT WORKLOAD EXECUTION EXECUTION_OF RELATIONSHIP OK");
// Start Output workload execution life
// Event packet - timeline, threadId, eventGuid
ProfilingGuid outputWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION START OF LIFE EVENT OK");
+ MESSAGE("OUTPUT WORKLOAD EXECUTION START OF LIFE EVENT OK");
// Output workload execution - event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1038,13 +1037,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
+ MESSAGE("OUTPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
// End of Normalize workload execution life
// Event packet - timeline, threadId, eventGuid
ProfilingGuid outputWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION END OF LIFE EVENT OK");
+ MESSAGE("OUTPUT WORKLOAD EXECUTION END OF LIFE EVENT OK");
// Output workload execution - event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1054,13 +1053,13 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
+ MESSAGE("OUTPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
// End of Inference life
// Event packet - timeline, threadId, eventGuid
ProfilingGuid inferenceEOLEventGuid = VerifyTimelineEventBinaryPacket(
EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
- BOOST_TEST_MESSAGE("INFERENCE END OF LIFE EVENT OK");
+ MESSAGE("INFERENCE END OF LIFE EVENT OK");
// Inference - event relationship
VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1070,14 +1069,14 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
readableData,
offset);
- BOOST_TEST_MESSAGE("INFERENCE - END OF LIFE EVENT RELATIONSHIP OK");
+ MESSAGE("INFERENCE - END OF LIFE EVENT RELATIONSHIP OK");
bufferManager.MarkRead(inferenceReadableBuffer);
}
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureCpuRef)
+TEST_CASE("ProfilingPostOptimisationStructureCpuRef")
{
VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index fa3f400569..8abcfd7595 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <armnn/Tensor.hpp>
#include <Graph.hpp>
@@ -14,7 +14,8 @@
#include <string>
-BOOST_AUTO_TEST_SUITE(ShapeInferenceTests)
+TEST_SUITE("ShapeInferenceTests")
+{
using namespace armnn;
namespace
{
@@ -72,7 +73,7 @@ void RunShapeInferenceTest(LayerT* const layer,
for (unsigned int i = 0; i < outputSize; ++i)
{
- BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
+ CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
}
};
@@ -84,14 +85,14 @@ void RunShapeInferenceTest(LayerT* const layer,
layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
- BOOST_CHECK_THROW(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+ CHECK_THROWS_AS(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
layer->ValidateTensorShapesFromInputs();
for (unsigned int i = 0; i < outputSize; ++i)
{
- BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
+ CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
}
// Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
@@ -116,7 +117,7 @@ void CreateGraphAndRunTest(const std::vector<TensorShape>& inputShapes,
RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
}
-BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
+TEST_CASE("NetworkOptionsTest")
{
BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
{
@@ -136,9 +137,9 @@ BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
- BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+ CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
- BOOST_CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
+ CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
@@ -156,7 +157,7 @@ BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
- BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+ CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
network = INetwork::Create();
@@ -168,22 +169,22 @@ BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
- BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+ CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
}
-BOOST_AUTO_TEST_CASE(AbsTest)
+TEST_CASE("AbsTest")
{
ActivationDescriptor descriptor;
descriptor.m_Function = ActivationFunction::Abs;
CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation");
}
-BOOST_AUTO_TEST_CASE(AdditionTest)
+TEST_CASE("AdditionTest")
{
CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
}
-BOOST_AUTO_TEST_CASE(ArgMinMaxTest)
+TEST_CASE("ArgMinMaxTest")
{
armnn::ArgMinMaxDescriptor descriptor;
descriptor.m_Function = ArgMinMaxFunction::Min;
@@ -192,13 +193,13 @@ BOOST_AUTO_TEST_CASE(ArgMinMaxTest)
CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax");
}
-BOOST_AUTO_TEST_CASE(BatchNormalizationTest)
+TEST_CASE("BatchNormalizationTest")
{
BatchNormalizationDescriptor descriptor;
CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm");
}
-BOOST_AUTO_TEST_CASE(BatchToSpaceNdTest)
+TEST_CASE("BatchToSpaceNdTest")
{
BatchToSpaceNdDescriptor descriptor;
@@ -212,7 +213,7 @@ BOOST_AUTO_TEST_CASE(BatchToSpaceNdTest)
CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend");
}
-BOOST_AUTO_TEST_CASE(ComparisionTest)
+TEST_CASE("ComparisionTest")
{
ComparisonDescriptor descriptor;
descriptor.m_Operation = ComparisonOperation::Equal;
@@ -222,7 +223,7 @@ BOOST_AUTO_TEST_CASE(ComparisionTest)
"comparision");
}
-BOOST_AUTO_TEST_CASE(ConcatTest)
+TEST_CASE("ConcatTest")
{
ConcatDescriptor descriptor(2, 3);
@@ -232,7 +233,7 @@ BOOST_AUTO_TEST_CASE(ConcatTest)
CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
}
-BOOST_AUTO_TEST_CASE(ConstantTesst)
+TEST_CASE("ConstantTesst")
{
Graph graph;
TensorShape outputShape{ 1, 1, 3, 3 };
@@ -246,31 +247,31 @@ BOOST_AUTO_TEST_CASE(ConstantTesst)
layer->ValidateTensorShapesFromInputs();
- BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
+ CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
}
-BOOST_AUTO_TEST_CASE(ConvertBf16ToFp32Test)
+TEST_CASE("ConvertBf16ToFp32Test")
{
CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
}
-BOOST_AUTO_TEST_CASE(ConvertFp16ToBf16Test)
+TEST_CASE("ConvertFp16ToBf16Test")
{
const TensorShape tensorShape{5, 7, 6, 2};
CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
}
-BOOST_AUTO_TEST_CASE(ConvertFp16ToFp32Test)
+TEST_CASE("ConvertFp16ToFp32Test")
{
CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
}
-BOOST_AUTO_TEST_CASE(ConvertFp32ToFp16Test)
+TEST_CASE("ConvertFp32ToFp16Test")
{
CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
}
-BOOST_AUTO_TEST_CASE(Convolution2dTest)
+TEST_CASE("Convolution2dTest")
{
const TensorShape inputShape{1, 1, 10, 10};
@@ -299,13 +300,13 @@ BOOST_AUTO_TEST_CASE(Convolution2dTest)
RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
}
-BOOST_AUTO_TEST_CASE(DebugLayerTest)
+TEST_CASE("DebugLayerTest")
{
const TensorShape tensorShape;
CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug");
}
-BOOST_AUTO_TEST_CASE(DepthToSpaceTest)
+TEST_CASE("DepthToSpaceTest")
{
DepthToSpaceDescriptor descriptor;
@@ -315,7 +316,7 @@ BOOST_AUTO_TEST_CASE(DepthToSpaceTest)
CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace");
}
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
+TEST_CASE("DepthwiseConvolutionTest")
{
DepthwiseConvolution2dDescriptor descriptor;
@@ -344,13 +345,13 @@ BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
}
-BOOST_AUTO_TEST_CASE(DequantizeTest)
+TEST_CASE("DequantizeTest")
{
const TensorShape tensorShape{5, 7, 6, 2};
CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize");
}
-BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
+TEST_CASE("DetectionPostProcessTest")
{
const TensorShape detectionBoxesInfo{ 1, 3, 4 };
const TensorShape detectionScoresInfo{ 1, 3, 4 };
@@ -384,7 +385,7 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }});
}
-BOOST_AUTO_TEST_CASE(FakeQuantizationTest)
+TEST_CASE("FakeQuantizationTest")
{
FakeQuantizationDescriptor descriptor;
descriptor.m_Max = 1;
@@ -392,13 +393,13 @@ BOOST_AUTO_TEST_CASE(FakeQuantizationTest)
CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization");
}
-BOOST_AUTO_TEST_CASE(FloorTest)
+TEST_CASE("FloorTest")
{
const TensorShape tensorShape{5, 7, 6, 2};
CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
}
-BOOST_AUTO_TEST_CASE(FullyConnectedTest)
+TEST_CASE("FullyConnectedTest")
{
Graph graph;
@@ -420,12 +421,12 @@ BOOST_AUTO_TEST_CASE(FullyConnectedTest)
RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
}
-BOOST_AUTO_TEST_CASE(GatherTest)
+TEST_CASE("GatherTest")
{
CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather");
}
-BOOST_AUTO_TEST_CASE(InstanceNormalizationTest)
+TEST_CASE("InstanceNormalizationTest")
{
const TensorShape tensorShape{5, 7, 6, 2};
@@ -434,7 +435,7 @@ BOOST_AUTO_TEST_CASE(InstanceNormalizationTest)
"instancenorm");
}
-BOOST_AUTO_TEST_CASE(L2NormalizationTest)
+TEST_CASE("L2NormalizationTest")
{
const TensorShape tensorShape{5, 7, 6, 2};
@@ -443,14 +444,14 @@ BOOST_AUTO_TEST_CASE(L2NormalizationTest)
"l2norm");
}
-BOOST_AUTO_TEST_CASE(LogSoftMaxTest)
+TEST_CASE("LogSoftMaxTest")
{
const TensorShape tensorShape{5, 7, 6, 2};
CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax");
}
-BOOST_AUTO_TEST_CASE(LstmTest)
+TEST_CASE("LstmTest")
{
const TensorShape inputShape{2, 5};
const TensorShape inputCellState{2, 20};
@@ -485,7 +486,7 @@ BOOST_AUTO_TEST_CASE(LstmTest)
RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
}
-BOOST_AUTO_TEST_CASE(MeanLayerTest)
+TEST_CASE("MeanLayerTest")
{
MeanDescriptor descriptor;
descriptor.m_Axis = {0};
@@ -493,30 +494,30 @@ BOOST_AUTO_TEST_CASE(MeanLayerTest)
CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean");
}
-BOOST_AUTO_TEST_CASE(MemCopyTest)
+TEST_CASE("MemCopyTest")
{
CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy");
}
-BOOST_AUTO_TEST_CASE(MemImportTest)
+TEST_CASE("MemImportTest")
{
CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport");
}
-BOOST_AUTO_TEST_CASE(MergeTest)
+TEST_CASE("MergeTest")
{
const TensorShape tensorShape{ 5, 7, 6, 2 };
CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge");
}
-BOOST_AUTO_TEST_CASE(NormalizationTest)
+TEST_CASE("NormalizationTest")
{
const TensorShape tensorShape{5, 7, 6, 2};
CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm");
}
-BOOST_AUTO_TEST_CASE(PermuteTest)
+TEST_CASE("PermuteTest")
{
PermuteDescriptor descriptor;
descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
@@ -524,7 +525,7 @@ BOOST_AUTO_TEST_CASE(PermuteTest)
CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute");
}
-BOOST_AUTO_TEST_CASE(Pooling2dTest)
+TEST_CASE("Pooling2dTest")
{
armnn::Pooling2dDescriptor descriptor;
descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
@@ -539,7 +540,7 @@ BOOST_AUTO_TEST_CASE(Pooling2dTest)
CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d");
}
-BOOST_AUTO_TEST_CASE(QLstmTest)
+TEST_CASE("QLstmTest")
{
const TensorShape inputShape{2, 5};
const TensorShape inputCellState{2, 20};
@@ -573,7 +574,7 @@ BOOST_AUTO_TEST_CASE(QLstmTest)
RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
-BOOST_AUTO_TEST_CASE(QuantizedLstmTest)
+TEST_CASE("QuantizedLstmTest")
{
const TensorShape inputShape{2, 5};
const TensorShape inputCellState{2, 20};
@@ -601,13 +602,13 @@ BOOST_AUTO_TEST_CASE(QuantizedLstmTest)
RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
}
-BOOST_AUTO_TEST_CASE(QuantizeTest)
+TEST_CASE("QuantizeTest")
{
const TensorShape tensorShape { 5, 4, 7, 6 };
CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean");
}
-BOOST_AUTO_TEST_CASE(RankTest)
+TEST_CASE("RankTest")
{
// due to rank having a scalar output we need a custom test
const TensorShape expectedOutputs(Dimensionality::Scalar);
@@ -617,23 +618,23 @@ BOOST_AUTO_TEST_CASE(RankTest)
layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
- BOOST_CHECK_THROW(
+ CHECK_THROWS_AS(
layer->ValidateTensorShapesFromInputs(), LayerValidationException);
layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
layer->ValidateTensorShapesFromInputs();
- BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
+ CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
layer->ValidateTensorShapesFromInputs();
- BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
+ CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
}
-BOOST_AUTO_TEST_CASE(ReshapeTest)
+TEST_CASE("ReshapeTest")
{
ReshapeDescriptor descriptor;
@@ -642,7 +643,7 @@ BOOST_AUTO_TEST_CASE(ReshapeTest)
CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape");
}
-BOOST_AUTO_TEST_CASE(ResizeTest)
+TEST_CASE("ResizeTest")
{
ResizeDescriptor descriptor;
@@ -652,7 +653,7 @@ BOOST_AUTO_TEST_CASE(ResizeTest)
CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize");
}
-BOOST_AUTO_TEST_CASE(SliceTest)
+TEST_CASE("SliceTest")
{
SliceDescriptor descriptor;
descriptor.m_Begin = { 1, 0, 1, 2 };
@@ -661,7 +662,7 @@ BOOST_AUTO_TEST_CASE(SliceTest)
CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean");
}
-BOOST_AUTO_TEST_CASE(SpaceToBatchNdTest)
+TEST_CASE("SpaceToBatchNdTest")
{
SpaceToBatchNdDescriptor descriptor;
@@ -675,7 +676,7 @@ BOOST_AUTO_TEST_CASE(SpaceToBatchNdTest)
CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd");
}
-BOOST_AUTO_TEST_CASE(SpaceToDepth)
+TEST_CASE("SpaceToDepth")
{
SpaceToDepthDescriptor descriptor;
@@ -685,7 +686,7 @@ BOOST_AUTO_TEST_CASE(SpaceToDepth)
CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth");
}
-BOOST_AUTO_TEST_CASE(SplitterTest)
+TEST_CASE("SplitterTest")
{
SplitterDescriptor descriptor(2, 3);
@@ -700,7 +701,7 @@ BOOST_AUTO_TEST_CASE(SplitterTest)
CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter");
}
-BOOST_AUTO_TEST_CASE(StackTest)
+TEST_CASE("StackTest")
{
StackDescriptor descriptor;
@@ -711,7 +712,7 @@ BOOST_AUTO_TEST_CASE(StackTest)
CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack");
}
-BOOST_AUTO_TEST_CASE(StridedSliceTest)
+TEST_CASE("StridedSliceTest")
{
StridedSliceDescriptor descriptor;
@@ -722,12 +723,12 @@ BOOST_AUTO_TEST_CASE(StridedSliceTest)
CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice");
}
-BOOST_AUTO_TEST_CASE(Switchtest)
+TEST_CASE("Switchtest")
{
CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch");
}
-BOOST_AUTO_TEST_CASE(TransposeConvolution2dTest)
+TEST_CASE("TransposeConvolution2dTest")
{
StridedSliceDescriptor descriptor;
@@ -738,7 +739,7 @@ BOOST_AUTO_TEST_CASE(TransposeConvolution2dTest)
CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t");
}
-BOOST_AUTO_TEST_CASE(TransposeTest)
+TEST_CASE("TransposeTest")
{
armnn::TransposeDescriptor descriptor;
descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
@@ -746,5 +747,5 @@ BOOST_AUTO_TEST_CASE(TransposeTest)
CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice");
}
-BOOST_AUTO_TEST_SUITE_END()
+}
} \ No newline at end of file
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index ecb876dc7a..d270787968 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -2,7 +2,8 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
+#include <doctest/doctest.h>
#include <Graph.hpp>
#include <SubgraphView.hpp>
@@ -89,24 +90,21 @@ std::vector<T> ToSortedArray(Iterator begin, Iterator end)
template <typename T>
void CompareVectors(const std::vector<T>& result, const std::vector<T>& expected)
{
- BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
+ CHECK(std::equal(result.begin(), result.end(), expected.begin(), expected.end()));
}
void CompareSubgraphViews(SubgraphViewSelector::SubgraphViewPtr& result,
SubgraphViewSelector::SubgraphViewPtr& expected)
{
// expect both to be valid subgraphs
- BOOST_TEST((result.get() != nullptr));
- BOOST_TEST((expected.get() != nullptr));
+ CHECK((result.get() != nullptr));
+ CHECK((expected.get() != nullptr));
if (result.get() != nullptr && expected.get() != nullptr)
{
- // try to detect all other obvious errors too, mainly because here
- // we can get a nicer error message from boost, the collection test
- // also report error for these
- BOOST_TEST(result->GetInputSlots().size() == expected->GetInputSlots().size());
- BOOST_TEST(result->GetOutputSlots().size() == expected->GetOutputSlots().size());
- BOOST_TEST(result->GetLayers().size() == expected->GetLayers().size());
+ CHECK(result->GetInputSlots().size() == expected->GetInputSlots().size());
+ CHECK(result->GetOutputSlots().size() == expected->GetOutputSlots().size());
+ CHECK(result->GetLayers().size() == expected->GetLayers().size());
auto resultLayers = ToSortedArray<Layer *>(result->GetLayers().begin(),
result->GetLayers().end());
@@ -130,9 +128,9 @@ void CompareSubgraphViews(SubgraphViewSelector::SubgraphViewPtr& result,
} // namespace <anonymous>
-BOOST_AUTO_TEST_SUITE(SubgraphSubstitution)
-
-BOOST_AUTO_TEST_CASE(SingleInputSingleOutput)
+TEST_SUITE("SubgraphSubstitution")
+{
+TEST_CASE("SingleInputSingleOutput")
{
// Construct graph
Graph graph;
@@ -166,11 +164,11 @@ BOOST_AUTO_TEST_CASE(SingleInputSingleOutput)
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
}
-BOOST_AUTO_TEST_CASE(SingleInputSingleOutputSubstituteGraph)
+TEST_CASE("SingleInputSingleOutputSubstituteGraph")
{
// Construct graph
Graph graph;
@@ -209,11 +207,11 @@ BOOST_AUTO_TEST_CASE(SingleInputSingleOutputSubstituteGraph)
graph.SubstituteSubgraph(*subgraph, *substituteSubgraph);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
}
-BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
+TEST_CASE("MultiInputSingleOutput")
{
// Construct graph
Graph graph;
@@ -258,13 +256,13 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
}
-BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
+TEST_CASE("SingleInputMultiOutput")
{
// Construct graph
Graph graph;
@@ -307,13 +305,13 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
}
-BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
+TEST_CASE("MultiInputMultiOutput")
{
// Construct graph
Graph graph;
@@ -359,14 +357,14 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
}
-BOOST_AUTO_TEST_CASE(EraseReplacedLayers)
+TEST_CASE("EraseReplacedLayers")
{
// Construct graph
Graph graph;
@@ -404,24 +402,24 @@ BOOST_AUTO_TEST_CASE(EraseReplacedLayers)
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
// Check that the layers belonging to the sub-graph have been erased from the graph after substitution
- BOOST_CHECK(!AreAnySubgraphLayersPresentInGraph(subgraphLayers, graph));
+ CHECK(!AreAnySubgraphLayersPresentInGraph(subgraphLayers, graph));
}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(SubgraphSelection)
+}
-BOOST_AUTO_TEST_CASE(SubgraphForEmptyGraph)
+TEST_SUITE("SubgraphSelection")
+{
+TEST_CASE("SubgraphForEmptyGraph")
{
Graph graph;
SubgraphView subgraph(graph);
- BOOST_TEST(subgraph.GetInputSlots().empty());
- BOOST_TEST(subgraph.GetOutputSlots().empty());
- BOOST_TEST(subgraph.GetLayers().empty());
+ CHECK(subgraph.GetInputSlots().empty());
+ CHECK(subgraph.GetOutputSlots().empty());
+ CHECK(subgraph.GetLayers().empty());
}
-BOOST_AUTO_TEST_CASE(SubgraphForEntireGraph)
+TEST_CASE("SubgraphForEntireGraph")
{
Graph graph;
@@ -436,12 +434,12 @@ BOOST_AUTO_TEST_CASE(SubgraphForEntireGraph)
SubgraphView subgraph(graph);
- BOOST_TEST(subgraph.GetInputSlots().empty());
- BOOST_TEST(subgraph.GetOutputSlots().empty());
- BOOST_TEST(subgraph.GetLayers().size() == graph.GetNumLayers());
+ CHECK(subgraph.GetInputSlots().empty());
+ CHECK(subgraph.GetOutputSlots().empty());
+ CHECK(subgraph.GetLayers().size() == graph.GetNumLayers());
}
-BOOST_AUTO_TEST_CASE(NoSubgraphsForNoMatch)
+TEST_CASE("NoSubgraphsForNoMatch")
{
Graph graph;
@@ -451,10 +449,10 @@ BOOST_AUTO_TEST_CASE(NoSubgraphsForNoMatch)
SubgraphViewSelector::Subgraphs subgraphs =
SubgraphViewSelector::SelectSubgraphs(graph, [](const Layer &) { return false; });
- BOOST_TEST(subgraphs.empty());
+ CHECK(subgraphs.empty());
}
-BOOST_AUTO_TEST_CASE(OneSubgraphsSelectedASingleMatch)
+TEST_CASE("OneSubgraphsSelectedASingleMatch")
{
Graph graph;
@@ -471,7 +469,7 @@ BOOST_AUTO_TEST_CASE(OneSubgraphsSelectedASingleMatch)
return isOutput;
});
- BOOST_TEST(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if (subgraphs.size() == 1)
{
auto expected = CreateSubgraphViewFrom(CreateInputsFrom({output}),
@@ -483,7 +481,7 @@ BOOST_AUTO_TEST_CASE(OneSubgraphsSelectedASingleMatch)
}
}
-BOOST_AUTO_TEST_CASE(MultipleLayersSelectedInTheMiddle)
+TEST_CASE("MultipleLayersSelectedInTheMiddle")
{
Graph graph;
@@ -506,7 +504,7 @@ BOOST_AUTO_TEST_CASE(MultipleLayersSelectedInTheMiddle)
return toSelect;
});
- BOOST_TEST(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if (subgraphs.size() == 1)
{
auto expected = CreateSubgraphViewFrom(CreateInputsFrom({mid1}),
@@ -517,7 +515,7 @@ BOOST_AUTO_TEST_CASE(MultipleLayersSelectedInTheMiddle)
}
}
-BOOST_AUTO_TEST_CASE(DisjointGraphs)
+TEST_CASE("DisjointGraphs")
{
// The input graph has two disjoint sections and all layers are selected.
// This should result in two subgraphs being produced.
@@ -542,11 +540,11 @@ BOOST_AUTO_TEST_CASE(DisjointGraphs)
// expected results to test against
auto expected1 = CreateSubgraphViewFrom({}, {}, { o0, n0, i0 });
auto expected2 = CreateSubgraphViewFrom({}, {}, { o1, n1, i1 });
- BOOST_TEST(subgraphs.size() == 2);
+ CHECK(subgraphs.size() == 2);
if (subgraphs.size() == 2)
{
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
if (std::find(subgraphs[0]->GetLayers().begin(), subgraphs[0]->GetLayers().end(), i0) !=
@@ -564,7 +562,7 @@ BOOST_AUTO_TEST_CASE(DisjointGraphs)
}
}
-BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
+TEST_CASE("IslandInTheMiddle")
{
// This case represent the scenario when a non-selected X1 node placed in the middle
// of the selected M* nodes.
@@ -629,12 +627,12 @@ BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
std::vector<OutputSlot*>{},
{ m5, m6 });
- BOOST_TEST(subgraphs.size() == 2);
+ CHECK(subgraphs.size() == 2);
if (subgraphs.size() == 2)
{
// we need to have valid subgraph pointers here
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
@@ -646,8 +644,8 @@ BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
}
);
- BOOST_TEST(subgraphs[0]->GetLayers().size() == 2);
- BOOST_TEST(subgraphs[1]->GetLayers().size() == 5);
+ CHECK(subgraphs[0]->GetLayers().size() == 2);
+ CHECK(subgraphs[1]->GetLayers().size() == 5);
CompareSubgraphViews(subgraphs[0], smallerSubgraph);
CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -655,7 +653,7 @@ BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
}
}
-BOOST_AUTO_TEST_CASE(MultipleSimpleSubgraphs)
+TEST_CASE("MultipleSimpleSubgraphs")
{
// This test case represents the scenario when we have two distinct subgraphs
// in a simple linear network. The selected nodes are the M* and the
@@ -704,12 +702,12 @@ BOOST_AUTO_TEST_CASE(MultipleSimpleSubgraphs)
CreateOutputsFrom({m3}),
{m3});
- BOOST_TEST(subgraphs.size() == 2);
+ CHECK(subgraphs.size() == 2);
if (subgraphs.size() == 2)
{
// we need to have valid subgraph pointers here
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
@@ -721,8 +719,8 @@ BOOST_AUTO_TEST_CASE(MultipleSimpleSubgraphs)
}
);
- BOOST_TEST(subgraphs[0]->GetLayers().size() == 1);
- BOOST_TEST(subgraphs[1]->GetLayers().size() == 2);
+ CHECK(subgraphs[0]->GetLayers().size() == 1);
+ CHECK(subgraphs[1]->GetLayers().size() == 2);
CompareSubgraphViews(subgraphs[0], smallerSubgraph);
CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -730,7 +728,7 @@ BOOST_AUTO_TEST_CASE(MultipleSimpleSubgraphs)
}
}
-BOOST_AUTO_TEST_CASE(SimpleLinearTest)
+TEST_CASE("SimpleLinearTest")
{
//X1 -> M1 -> M2 -> X2
//Where the input slots of M1 and the output slots of M2 are to be the sub graph boundaries.
@@ -765,7 +763,7 @@ BOOST_AUTO_TEST_CASE(SimpleLinearTest)
return toSelect;
});
- BOOST_CHECK(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if(subgraphs.size() == 1)
{
auto expected = CreateSubgraphViewFrom(CreateInputsFrom({layerM1}),
@@ -776,7 +774,7 @@ BOOST_AUTO_TEST_CASE(SimpleLinearTest)
}
}
-BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
+TEST_CASE("MultiInputSingleOutput")
{
//X1 -> M1 -> M3 -> X3
//X2 -> M2 -> M3 -> X3
@@ -820,7 +818,7 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
return toSelect;
});
- BOOST_CHECK(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if (subgraphs.size() == 1)
{
auto expected = CreateSubgraphViewFrom(CreateInputsFrom({layerM1, layerM2}),
@@ -831,7 +829,7 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
}
}
-BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
+TEST_CASE("SingleInputMultiOutput")
{
//X1 -> M1 -> M2 -> X2
//X1 -> M1 -> M3 -> X3
@@ -876,7 +874,7 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
return toSelect;
});
- BOOST_CHECK(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if(subgraphs.size() == 1)
{
auto expected = CreateSubgraphViewFrom(CreateInputsFrom({layerM1}),
@@ -887,7 +885,7 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
}
}
-BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
+TEST_CASE("MultiInputMultiOutput")
{
// This case represents the scenario with multiple inputs and multiple outputs
//
@@ -940,7 +938,7 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
});
- BOOST_CHECK(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if (subgraphs.size() == 1)
{
auto expected = CreateSubgraphViewFrom(CreateInputsFrom({m1, m2}),
@@ -951,7 +949,7 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
}
}
-BOOST_AUTO_TEST_CASE(ValidMerge)
+TEST_CASE("ValidMerge")
{
// Checks that a node that has multiple choices for merge candidates (M3 in this case) correctly merges with the
// one that it can (M0), and doesn't merge with the ones it can't (X2 and M2).
@@ -1001,12 +999,12 @@ BOOST_AUTO_TEST_CASE(ValidMerge)
CreateOutputsFrom({ }),
{ m0, m3 });
- BOOST_TEST(subgraphs.size() == 2);
+ CHECK(subgraphs.size() == 2);
if (subgraphs.size() == 2)
{
// we need to have valid subgraph pointers here
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
@@ -1024,7 +1022,7 @@ BOOST_AUTO_TEST_CASE(ValidMerge)
}
}
-BOOST_AUTO_TEST_CASE(PropagatedDependencies)
+TEST_CASE("PropagatedDependencies")
{
// Version of IslandInTheMiddle with longer chain
// to make sure antecedents are propagated.
@@ -1095,13 +1093,13 @@ BOOST_AUTO_TEST_CASE(PropagatedDependencies)
auto smallerSubgraph =
CreateSubgraphViewFrom(CreateInputsFrom({ m10 }), CreateOutputsFrom({ m10 }), { m10 });
- BOOST_TEST(subgraphs.size() == 3);
+ CHECK(subgraphs.size() == 3);
if (subgraphs.size() == 3)
{
// we need to have valid subgraph pointers here
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
- BOOST_TEST((subgraphs[2] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
+ CHECK((subgraphs[2] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr && subgraphs[2].get() != nullptr)
{
@@ -1120,7 +1118,7 @@ BOOST_AUTO_TEST_CASE(PropagatedDependencies)
}
}
-BOOST_AUTO_TEST_CASE(Random)
+TEST_CASE("Random")
{
// Creates random networks, splits them into subgraphs and checks the resulting subgraphs obey the required
// dependency rules. We can easily generate very large networks which helps cover corner cases the other
@@ -1319,7 +1317,7 @@ BOOST_AUTO_TEST_CASE(Random)
Layer* l = toProcess.front();
toProcess.pop();
- BOOST_CHECK(layerToSubgraph[l] != subgraph.get());
+ CHECK(layerToSubgraph[l] != subgraph.get());
for (const InputSlot& is : l->GetInputSlots())
{
@@ -1331,11 +1329,11 @@ BOOST_AUTO_TEST_CASE(Random)
}
}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(IntegrationTests)
+}
-BOOST_AUTO_TEST_CASE(SingleSubgraph)
+TEST_SUITE("IntegrationTests")
+{
+TEST_CASE("SingleSubgraph")
{
// This test case represents the scenario when we have one subgraph
// in which two layers have GpuAcc backend assigned
@@ -1368,18 +1366,18 @@ BOOST_AUTO_TEST_CASE(SingleSubgraph)
return toSelect;
});
- BOOST_TEST(subgraphs.size() == 1);
+ CHECK(subgraphs.size() == 1);
if(subgraphs.size() == 1)
{
- BOOST_TEST((subgraphs[0] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
if (subgraphs[0].get() != nullptr)
{
unsigned int numInputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetInputSlots().size());
unsigned int numOutputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetOutputSlots().size());
- BOOST_TEST((numInputSlots == 1));
- BOOST_TEST((numOutputSlots == 1));
+ CHECK((numInputSlots == 1));
+ CHECK((numOutputSlots == 1));
// Save sub-graph connections for comparison after substitution
IOutputSlot* subgraphInputConn1 = subgraphs[0]->GetInputSlot(0)->GetConnection();
@@ -1393,14 +1391,14 @@ BOOST_AUTO_TEST_CASE(SingleSubgraph)
graph.SubstituteSubgraph(*subgraphs[0], preCompiledLayer);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+ CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
+ CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
}
}
}
-BOOST_AUTO_TEST_CASE(MultipleSubgraphs)
+TEST_CASE("MultipleSubgraphs")
{
// This test case represents the scenario when we have two subgraphs
// in which two layers have CpuAcc backend assigned
@@ -1441,11 +1439,11 @@ BOOST_AUTO_TEST_CASE(MultipleSubgraphs)
return toSelect;
});
- BOOST_TEST(subgraphs.size() == 2);
+ CHECK(subgraphs.size() == 2);
if(subgraphs.size() == 2)
{
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
@@ -1484,18 +1482,18 @@ BOOST_AUTO_TEST_CASE(MultipleSubgraphs)
graph.SubstituteSubgraph(*subgraphs[1], preCompiledLayer2);
// Check that connections are correct after substitution
- BOOST_CHECK_EQUAL(preCompiledLayer1->GetInputSlot(0).GetConnection(), subgraph1InputConn);
- BOOST_CHECK_EQUAL(preCompiledLayer1->GetOutputSlot(0).GetConnection(0), subgraph1OutputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer1->GetOutputSlot(1).GetConnection(0), subgraph1OutputConn2);
+ CHECK_EQ(preCompiledLayer1->GetInputSlot(0).GetConnection(), subgraph1InputConn);
+ CHECK_EQ(preCompiledLayer1->GetOutputSlot(0).GetConnection(0), subgraph1OutputConn1);
+ CHECK_EQ(preCompiledLayer1->GetOutputSlot(1).GetConnection(0), subgraph1OutputConn2);
- BOOST_CHECK_EQUAL(preCompiledLayer2->GetInputSlot(0).GetConnection(), subgraph2InputConn1);
- BOOST_CHECK_EQUAL(preCompiledLayer2->GetInputSlot(1).GetConnection(), subgraph2InputConn2);
- BOOST_CHECK_EQUAL(preCompiledLayer2->GetOutputSlot(0).GetConnection(0), subgraph2OutputConn);
+ CHECK_EQ(preCompiledLayer2->GetInputSlot(0).GetConnection(), subgraph2InputConn1);
+ CHECK_EQ(preCompiledLayer2->GetInputSlot(1).GetConnection(), subgraph2InputConn2);
+ CHECK_EQ(preCompiledLayer2->GetOutputSlot(0).GetConnection(0), subgraph2OutputConn);
}
}
}
-BOOST_AUTO_TEST_CASE(SubgraphCycles)
+TEST_CASE("SubgraphCycles")
{
// This case represent the scenario when a naive split could lead to a cyclic dependency between two subgraphs
//
@@ -1555,12 +1553,12 @@ BOOST_AUTO_TEST_CASE(SubgraphCycles)
CreateOutputsFrom({m2}),
{m2});
- BOOST_TEST(subgraphs.size() == 2);
+ CHECK(subgraphs.size() == 2);
if (subgraphs.size() == 2)
{
// we need to have valid subgraph pointers here
- BOOST_TEST((subgraphs[0] != nullptr));
- BOOST_TEST((subgraphs[1] != nullptr));
+ CHECK((subgraphs[0] != nullptr));
+ CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
@@ -1573,8 +1571,8 @@ BOOST_AUTO_TEST_CASE(SubgraphCycles)
);
// one subgraph needs to be size=1 and the other one is 4
- BOOST_TEST(subgraphs[0]->GetLayers().size() == 1);
- BOOST_TEST(subgraphs[1]->GetLayers().size() == 2);
+ CHECK(subgraphs[0]->GetLayers().size() == 1);
+ CHECK(subgraphs[1]->GetLayers().size() == 2);
CompareSubgraphViews(subgraphs[0], outputSubgraph);
CompareSubgraphViews(subgraphs[1], inputSubgraph);
@@ -1582,7 +1580,7 @@ BOOST_AUTO_TEST_CASE(SubgraphCycles)
}
}
-BOOST_AUTO_TEST_CASE(SubgraphOrder)
+TEST_CASE("SubgraphOrder")
{
Graph graph;
@@ -1603,10 +1601,10 @@ BOOST_AUTO_TEST_CASE(SubgraphOrder)
LayerType expectedSorted[] = {LayerType::Input, LayerType::Activation, LayerType::Output};
view->ForEachLayer([&idx, &expectedSorted](const Layer* l)
{
- BOOST_TEST((expectedSorted[idx] == l->GetType()));
+ CHECK((expectedSorted[idx] == l->GetType()));
idx++;
}
);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 47d0666414..fb26880d0c 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -2,7 +2,8 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+
+#include <doctest/doctest.h>
#include <armnn/LayerVisitorBase.hpp>
@@ -270,29 +271,29 @@ private:
};
-BOOST_AUTO_TEST_SUITE(TensorHandle)
-
-BOOST_AUTO_TEST_CASE(RegisterFactories)
+TEST_SUITE("TensorHandle")
+{
+TEST_CASE("RegisterFactories")
{
TestBackendA backendA;
TestBackendB backendB;
- BOOST_TEST(backendA.GetHandleFactoryPreferences()[0] == "TestHandleFactoryA1");
- BOOST_TEST(backendA.GetHandleFactoryPreferences()[1] == "TestHandleFactoryA2");
- BOOST_TEST(backendA.GetHandleFactoryPreferences()[2] == "TestHandleFactoryB1");
- BOOST_TEST(backendA.GetHandleFactoryPreferences()[3] == "TestHandleFactoryD1");
+ CHECK(backendA.GetHandleFactoryPreferences()[0] == "TestHandleFactoryA1");
+ CHECK(backendA.GetHandleFactoryPreferences()[1] == "TestHandleFactoryA2");
+ CHECK(backendA.GetHandleFactoryPreferences()[2] == "TestHandleFactoryB1");
+ CHECK(backendA.GetHandleFactoryPreferences()[3] == "TestHandleFactoryD1");
TensorHandleFactoryRegistry registry;
backendA.RegisterTensorHandleFactories(registry);
backendB.RegisterTensorHandleFactories(registry);
- BOOST_TEST((registry.GetFactory("Non-existing Backend") == nullptr));
- BOOST_TEST((registry.GetFactory("TestHandleFactoryA1") != nullptr));
- BOOST_TEST((registry.GetFactory("TestHandleFactoryA2") != nullptr));
- BOOST_TEST((registry.GetFactory("TestHandleFactoryB1") != nullptr));
+ CHECK((registry.GetFactory("Non-existing Backend") == nullptr));
+ CHECK((registry.GetFactory("TestHandleFactoryA1") != nullptr));
+ CHECK((registry.GetFactory("TestHandleFactoryA2") != nullptr));
+ CHECK((registry.GetFactory("TestHandleFactoryB1") != nullptr));
}
-BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
+TEST_CASE("TensorHandleSelectionStrategy")
{
auto backendA = std::make_unique<TestBackendA>();
auto backendB = std::make_unique<TestBackendB>();
@@ -343,8 +344,8 @@ BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
std::vector<std::string> errors;
auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
- BOOST_TEST(result.m_Error == false);
- BOOST_TEST(result.m_Warning == false);
+ CHECK(result.m_Error == false);
+ CHECK(result.m_Warning == false);
OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
@@ -353,18 +354,18 @@ BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
// Check that the correct factory was selected
- BOOST_TEST(inputLayerOut.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
- BOOST_TEST(softmaxLayer1Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
- BOOST_TEST(softmaxLayer2Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
- BOOST_TEST(softmaxLayer3Out.GetTensorHandleFactoryId() == "TestHandleFactoryC1");
- BOOST_TEST(softmaxLayer4Out.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
+ CHECK(inputLayerOut.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
+ CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
+ CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
+ CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "TestHandleFactoryC1");
+ CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
// Check that the correct strategy was selected
- BOOST_TEST((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
- BOOST_TEST((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
- BOOST_TEST((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::CopyToTarget));
- BOOST_TEST((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::ExportToTarget));
- BOOST_TEST((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::CopyToTarget));
+ CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::ExportToTarget));
+ CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
graph.AddCompatibilityLayers(backends, registry);
@@ -377,7 +378,7 @@ BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
copyCount++;
}
});
- BOOST_TEST(copyCount == 1);
+ CHECK(copyCount == 1);
// Test for import layers
int importCount= 0;
@@ -388,7 +389,7 @@ BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
importCount++;
}
});
- BOOST_TEST(importCount == 1);
+ CHECK(importCount == 1);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index b8788e7826..95cea58b30 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -12,7 +12,7 @@
#include <QuantizeHelper.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <array>
#include <cmath>
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index a0b68acdd2..fd2d7846e2 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -6,40 +6,12 @@
#include <armnn/Tensor.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <doctest/doctest.h>
-namespace armnn
-{
-
-// Adds unit test framework for interpreting TensorInfo type.
-std::ostream& boost_test_print_type(std::ostream& ostr, const TensorInfo& right)
-{
- ostr << "TensorInfo[ "
- << right.GetNumDimensions() << ","
- << right.GetShape()[0] << ","
- << right.GetShape()[1] << ","
- << right.GetShape()[2] << ","
- << right.GetShape()[3]
- << " ]" << std::endl;
- return ostr;
-}
-
-std::ostream& boost_test_print_type(std::ostream& ostr, const TensorShape& shape)
-{
- ostr << "TensorShape[ "
- << shape.GetNumDimensions() << ","
- << shape[0] << ","
- << shape[1] << ","
- << shape[2] << ","
- << shape[3]
- << " ]" << std::endl;
- return ostr;
-}
-
-} //namespace armnn
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Tensor)
-
+TEST_SUITE("Tensor")
+{
struct TensorInfoFixture
{
TensorInfoFixture()
@@ -52,54 +24,54 @@ struct TensorInfoFixture
TensorInfo m_TensorInfo;
};
-BOOST_FIXTURE_TEST_CASE(ConstructShapeUsingListInitialization, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "ConstructShapeUsingListInitialization")
{
TensorShape listInitializedShape{ 6, 7, 8, 9 };
- BOOST_TEST(listInitializedShape == m_TensorInfo.GetShape());
+ CHECK(listInitializedShape == m_TensorInfo.GetShape());
}
-BOOST_FIXTURE_TEST_CASE(ConstructTensorInfo, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "ConstructTensorInfo")
{
- BOOST_TEST(m_TensorInfo.GetNumDimensions() == 4);
- BOOST_TEST(m_TensorInfo.GetShape()[0] == 6); // <= Outer most
- BOOST_TEST(m_TensorInfo.GetShape()[1] == 7);
- BOOST_TEST(m_TensorInfo.GetShape()[2] == 8);
- BOOST_TEST(m_TensorInfo.GetShape()[3] == 9); // <= Inner most
+ CHECK(m_TensorInfo.GetNumDimensions() == 4);
+ CHECK(m_TensorInfo.GetShape()[0] == 6); // <= Outer most
+ CHECK(m_TensorInfo.GetShape()[1] == 7);
+ CHECK(m_TensorInfo.GetShape()[2] == 8);
+ CHECK(m_TensorInfo.GetShape()[3] == 9); // <= Inner most
}
-BOOST_FIXTURE_TEST_CASE(CopyConstructTensorInfo, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "CopyConstructTensorInfo")
{
TensorInfo copyConstructed(m_TensorInfo);
- BOOST_TEST(copyConstructed.GetNumDimensions() == 4);
- BOOST_TEST(copyConstructed.GetShape()[0] == 6);
- BOOST_TEST(copyConstructed.GetShape()[1] == 7);
- BOOST_TEST(copyConstructed.GetShape()[2] == 8);
- BOOST_TEST(copyConstructed.GetShape()[3] == 9);
+ CHECK(copyConstructed.GetNumDimensions() == 4);
+ CHECK(copyConstructed.GetShape()[0] == 6);
+ CHECK(copyConstructed.GetShape()[1] == 7);
+ CHECK(copyConstructed.GetShape()[2] == 8);
+ CHECK(copyConstructed.GetShape()[3] == 9);
}
-BOOST_FIXTURE_TEST_CASE(TensorInfoEquality, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "TensorInfoEquality")
{
TensorInfo copyConstructed(m_TensorInfo);
- BOOST_TEST(copyConstructed == m_TensorInfo);
+ CHECK(copyConstructed == m_TensorInfo);
}
-BOOST_FIXTURE_TEST_CASE(TensorInfoInequality, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "TensorInfoInequality")
{
TensorInfo other;
unsigned int sizes[] = {2,3,4,5};
other = TensorInfo(4, sizes, DataType::Float32);
- BOOST_TEST(other != m_TensorInfo);
+ CHECK(other != m_TensorInfo);
}
-BOOST_FIXTURE_TEST_CASE(TensorInfoAssignmentOperator, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "TensorInfoAssignmentOperator")
{
TensorInfo copy;
copy = m_TensorInfo;
- BOOST_TEST(copy == m_TensorInfo);
+ CHECK(copy == m_TensorInfo);
}
-BOOST_AUTO_TEST_CASE(CopyNoQuantizationTensorInfo)
+TEST_CASE("CopyNoQuantizationTensorInfo")
{
TensorInfo infoA;
infoA.SetShape({ 5, 6, 7, 8 });
@@ -112,24 +84,24 @@ BOOST_AUTO_TEST_CASE(CopyNoQuantizationTensorInfo)
infoB.SetQuantizationOffset(5);
infoB.SetQuantizationDim(Optional<unsigned int>(1));
- BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
- BOOST_TEST(infoA.GetQuantizationScale() == 1);
- BOOST_TEST(infoA.GetQuantizationOffset() == 0);
- BOOST_CHECK(!infoA.GetQuantizationDim().has_value());
+ CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+ CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+ CHECK(infoA.GetQuantizationScale() == 1);
+ CHECK(infoA.GetQuantizationOffset() == 0);
+ CHECK(!infoA.GetQuantizationDim().has_value());
- BOOST_TEST(infoA != infoB);
+ CHECK(infoA != infoB);
infoA = infoB;
- BOOST_TEST(infoA == infoB);
+ CHECK(infoA == infoB);
- BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
- BOOST_TEST(infoA.GetQuantizationScale() == 10.0f);
- BOOST_TEST(infoA.GetQuantizationOffset() == 5);
- BOOST_CHECK(infoA.GetQuantizationDim().value() == 1);
+ CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+ CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+ CHECK(infoA.GetQuantizationScale() == 10.0f);
+ CHECK(infoA.GetQuantizationOffset() == 5);
+ CHECK(infoA.GetQuantizationDim().value() == 1);
}
-BOOST_AUTO_TEST_CASE(CopyDifferentQuantizationTensorInfo)
+TEST_CASE("CopyDifferentQuantizationTensorInfo")
{
TensorInfo infoA;
infoA.SetShape({ 5, 6, 7, 8 });
@@ -145,21 +117,21 @@ BOOST_AUTO_TEST_CASE(CopyDifferentQuantizationTensorInfo)
infoB.SetQuantizationOffset(6);
infoB.SetQuantizationDim(Optional<unsigned int>(2));
- BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
- BOOST_TEST(infoA.GetQuantizationScale() == 10.0f);
- BOOST_TEST(infoA.GetQuantizationOffset() == 5);
- BOOST_CHECK(infoA.GetQuantizationDim().value() == 1);
+ CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+ CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+ CHECK(infoA.GetQuantizationScale() == 10.0f);
+ CHECK(infoA.GetQuantizationOffset() == 5);
+ CHECK(infoA.GetQuantizationDim().value() == 1);
- BOOST_TEST(infoA != infoB);
+ CHECK(infoA != infoB);
infoA = infoB;
- BOOST_TEST(infoA == infoB);
+ CHECK(infoA == infoB);
- BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
- BOOST_TEST(infoA.GetQuantizationScale() == 11.0f);
- BOOST_TEST(infoA.GetQuantizationOffset() == 6);
- BOOST_CHECK(infoA.GetQuantizationDim().value() == 2);
+ CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+ CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+ CHECK(infoA.GetQuantizationScale() == 11.0f);
+ CHECK(infoA.GetQuantizationOffset() == 6);
+ CHECK(infoA.GetQuantizationDim().value() == 2);
}
void CheckTensor(const ConstTensor& t)
@@ -167,7 +139,7 @@ void CheckTensor(const ConstTensor& t)
t.GetInfo();
}
-BOOST_AUTO_TEST_CASE(TensorVsConstTensor)
+TEST_CASE("TensorVsConstTensor")
{
int mutableDatum = 2;
const int immutableDatum = 3;
@@ -185,68 +157,68 @@ BOOST_AUTO_TEST_CASE(TensorVsConstTensor)
CheckTensor(ct);
}
-BOOST_AUTO_TEST_CASE(ModifyTensorInfo)
+TEST_CASE("ModifyTensorInfo")
{
TensorInfo info;
info.SetShape({ 5, 6, 7, 8 });
- BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+ CHECK((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
info.SetDataType(DataType::QAsymmU8);
- BOOST_TEST((info.GetDataType() == DataType::QAsymmU8));
+ CHECK((info.GetDataType() == DataType::QAsymmU8));
info.SetQuantizationScale(10.0f);
- BOOST_TEST(info.GetQuantizationScale() == 10.0f);
+ CHECK(info.GetQuantizationScale() == 10.0f);
info.SetQuantizationOffset(5);
- BOOST_TEST(info.GetQuantizationOffset() == 5);
+ CHECK(info.GetQuantizationOffset() == 5);
}
-BOOST_AUTO_TEST_CASE(TensorShapeOperatorBrackets)
+TEST_CASE("TensorShapeOperatorBrackets")
{
const TensorShape constShape({0,1,2,3});
TensorShape shape({0,1,2,3});
// Checks version of operator[] which returns an unsigned int.
- BOOST_TEST(shape[2] == 2);
+ CHECK(shape[2] == 2);
shape[2] = 20;
- BOOST_TEST(shape[2] == 20);
+ CHECK(shape[2] == 20);
// Checks the version of operator[] which returns a reference.
- BOOST_TEST(constShape[2] == 2);
+ CHECK(constShape[2] == 2);
}
-BOOST_AUTO_TEST_CASE(TensorInfoPerAxisQuantization)
+TEST_CASE("TensorInfoPerAxisQuantization")
{
// Old constructor
TensorInfo tensorInfo0({ 1, 1 }, DataType::Float32, 2.0f, 1);
- BOOST_CHECK(!tensorInfo0.HasMultipleQuantizationScales());
- BOOST_CHECK(tensorInfo0.GetQuantizationScale() == 2.0f);
- BOOST_CHECK(tensorInfo0.GetQuantizationOffset() == 1);
- BOOST_CHECK(tensorInfo0.GetQuantizationScales()[0] == 2.0f);
- BOOST_CHECK(!tensorInfo0.GetQuantizationDim().has_value());
+ CHECK(!tensorInfo0.HasMultipleQuantizationScales());
+ CHECK(tensorInfo0.GetQuantizationScale() == 2.0f);
+ CHECK(tensorInfo0.GetQuantizationOffset() == 1);
+ CHECK(tensorInfo0.GetQuantizationScales()[0] == 2.0f);
+ CHECK(!tensorInfo0.GetQuantizationDim().has_value());
// Set per-axis quantization scales
std::vector<float> perAxisScales{ 3.0f, 4.0f };
tensorInfo0.SetQuantizationScales(perAxisScales);
- BOOST_CHECK(tensorInfo0.HasMultipleQuantizationScales());
- BOOST_CHECK(tensorInfo0.GetQuantizationScales() == perAxisScales);
+ CHECK(tensorInfo0.HasMultipleQuantizationScales());
+ CHECK(tensorInfo0.GetQuantizationScales() == perAxisScales);
// Set per-tensor quantization scale
tensorInfo0.SetQuantizationScale(5.0f);
- BOOST_CHECK(!tensorInfo0.HasMultipleQuantizationScales());
- BOOST_CHECK(tensorInfo0.GetQuantizationScales()[0] == 5.0f);
+ CHECK(!tensorInfo0.HasMultipleQuantizationScales());
+ CHECK(tensorInfo0.GetQuantizationScales()[0] == 5.0f);
// Set quantization offset
tensorInfo0.SetQuantizationDim(Optional<unsigned int>(1));
- BOOST_CHECK(tensorInfo0.GetQuantizationDim().value() == 1);
+ CHECK(tensorInfo0.GetQuantizationDim().value() == 1);
// New constructor
perAxisScales = { 6.0f, 7.0f };
TensorInfo tensorInfo1({ 1, 1 }, DataType::Float32, perAxisScales, 1);
- BOOST_CHECK(tensorInfo1.HasMultipleQuantizationScales());
- BOOST_CHECK(tensorInfo1.GetQuantizationOffset() == 0);
- BOOST_CHECK(tensorInfo1.GetQuantizationScales() == perAxisScales);
- BOOST_CHECK(tensorInfo1.GetQuantizationDim().value() == 1);
+ CHECK(tensorInfo1.HasMultipleQuantizationScales());
+ CHECK(tensorInfo1.GetQuantizationOffset() == 0);
+ CHECK(tensorInfo1.GetQuantizationScales() == perAxisScales);
+ CHECK(tensorInfo1.GetQuantizationDim().value() == 1);
}
-BOOST_AUTO_TEST_CASE(TensorShape_scalar)
+TEST_CASE("TensorShape_scalar")
{
float mutableDatum = 3.1416f;
@@ -254,33 +226,33 @@ BOOST_AUTO_TEST_CASE(TensorShape_scalar)
armnn::TensorInfo info ( shape, DataType::Float32 );
const armnn::Tensor tensor ( info, &mutableDatum );
- BOOST_CHECK(armnn::Dimensionality::Scalar == shape.GetDimensionality());
+ CHECK(armnn::Dimensionality::Scalar == shape.GetDimensionality());
float scalarValue = *reinterpret_cast<float*>(tensor.GetMemoryArea());
- BOOST_CHECK_MESSAGE(mutableDatum == scalarValue, "Scalar value is " << scalarValue);
+ CHECK_MESSAGE(mutableDatum == scalarValue, "Scalar value is " << scalarValue);
armnn::TensorShape shape_equal;
armnn::TensorShape shape_different;
shape_equal = shape;
- BOOST_TEST(shape_equal == shape);
- BOOST_TEST(shape_different != shape);
- BOOST_CHECK_MESSAGE(1 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
- BOOST_CHECK_MESSAGE(1 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK(true == shape.GetDimensionSpecificity(0));
- BOOST_CHECK(shape.AreAllDimensionsSpecified());
- BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
-
- BOOST_TEST(1 == shape[0]);
- BOOST_TEST(1 == tensor.GetShape()[0]);
- BOOST_TEST(1 == tensor.GetInfo().GetShape()[0]);
- BOOST_CHECK_THROW( shape[1], InvalidArgumentException );
+ CHECK(shape_equal == shape);
+ CHECK(shape_different != shape);
+ CHECK_MESSAGE(1 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+ CHECK_MESSAGE(1 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK(true == shape.GetDimensionSpecificity(0));
+ CHECK(shape.AreAllDimensionsSpecified());
+ CHECK(shape.IsAtLeastOneDimensionSpecified());
+
+ CHECK(1 == shape[0]);
+ CHECK(1 == tensor.GetShape()[0]);
+ CHECK(1 == tensor.GetInfo().GetShape()[0]);
+ CHECK_THROWS_AS( shape[1], InvalidArgumentException );
float newMutableDatum = 42.f;
std::memcpy(tensor.GetMemoryArea(), &newMutableDatum, sizeof(float));
scalarValue = *reinterpret_cast<float*>(tensor.GetMemoryArea());
- BOOST_CHECK_MESSAGE(newMutableDatum == scalarValue, "Scalar value is " << scalarValue);
+ CHECK_MESSAGE(newMutableDatum == scalarValue, "Scalar value is " << scalarValue);
}
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownNumberDimensions)
+TEST_CASE("TensorShape_DynamicTensorType1_unknownNumberDimensions")
{
float mutableDatum = 3.1416f;
@@ -288,19 +260,19 @@ BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownNumberDimensions)
armnn::TensorInfo info ( shape, DataType::Float32 );
armnn::Tensor tensor ( info, &mutableDatum );
- BOOST_CHECK(armnn::Dimensionality::NotSpecified == shape.GetDimensionality());
- BOOST_CHECK_THROW( shape[0], InvalidArgumentException );
- BOOST_CHECK_THROW( shape.GetNumElements(), InvalidArgumentException );
- BOOST_CHECK_THROW( shape.GetNumDimensions(), InvalidArgumentException );
+ CHECK(armnn::Dimensionality::NotSpecified == shape.GetDimensionality());
+ CHECK_THROWS_AS( shape[0], InvalidArgumentException );
+ CHECK_THROWS_AS( shape.GetNumElements(), InvalidArgumentException );
+ CHECK_THROWS_AS( shape.GetNumDimensions(), InvalidArgumentException );
armnn::TensorShape shape_equal;
armnn::TensorShape shape_different;
shape_equal = shape;
- BOOST_TEST(shape_equal == shape);
- BOOST_TEST(shape_different != shape);
+ CHECK(shape_equal == shape);
+ CHECK(shape_different != shape);
}
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownAllDimensionsSizes)
+TEST_CASE("TensorShape_DynamicTensorType1_unknownAllDimensionsSizes")
{
float mutableDatum = 3.1416f;
@@ -308,23 +280,23 @@ BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownAllDimensionsSizes)
armnn::TensorInfo info ( shape, DataType::Float32 );
armnn::Tensor tensor ( info, &mutableDatum );
- BOOST_CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
- BOOST_CHECK_MESSAGE(0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
- BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK(false == shape.GetDimensionSpecificity(0));
- BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
- BOOST_CHECK(false == shape.GetDimensionSpecificity(2));
- BOOST_CHECK(!shape.AreAllDimensionsSpecified());
- BOOST_CHECK(!shape.IsAtLeastOneDimensionSpecified());
+ CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
+ CHECK_MESSAGE(0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+ CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK(false == shape.GetDimensionSpecificity(0));
+ CHECK(false == shape.GetDimensionSpecificity(1));
+ CHECK(false == shape.GetDimensionSpecificity(2));
+ CHECK(!shape.AreAllDimensionsSpecified());
+ CHECK(!shape.IsAtLeastOneDimensionSpecified());
armnn::TensorShape shape_equal;
armnn::TensorShape shape_different;
shape_equal = shape;
- BOOST_TEST(shape_equal == shape);
- BOOST_TEST(shape_different != shape);
+ CHECK(shape_equal == shape);
+ CHECK(shape_different != shape);
}
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownSomeDimensionsSizes)
+TEST_CASE("TensorShape_DynamicTensorType1_unknownSomeDimensionsSizes")
{
std::vector<float> mutableDatum { 42.f, 42.f, 42.f,
0.0f, 0.1f, 0.2f };
@@ -333,36 +305,36 @@ BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownSomeDimensionsSizes)
armnn::TensorInfo info ( shape, DataType::Float32 );
armnn::Tensor tensor ( info, &mutableDatum );
- BOOST_CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
- BOOST_CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
- BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK(true == shape.GetDimensionSpecificity(0));
- BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
- BOOST_CHECK(true == shape.GetDimensionSpecificity(2));
- BOOST_CHECK(!shape.AreAllDimensionsSpecified());
- BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+ CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
+ CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+ CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK(true == shape.GetDimensionSpecificity(0));
+ CHECK(false == shape.GetDimensionSpecificity(1));
+ CHECK(true == shape.GetDimensionSpecificity(2));
+ CHECK(!shape.AreAllDimensionsSpecified());
+ CHECK(shape.IsAtLeastOneDimensionSpecified());
- BOOST_CHECK_THROW(shape[1], InvalidArgumentException);
- BOOST_CHECK_THROW(tensor.GetShape()[1], InvalidArgumentException);
- BOOST_CHECK_THROW(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
+ CHECK_THROWS_AS(shape[1], InvalidArgumentException);
+ CHECK_THROWS_AS(tensor.GetShape()[1], InvalidArgumentException);
+ CHECK_THROWS_AS(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
- BOOST_TEST(2 == shape[0]);
- BOOST_TEST(2 == tensor.GetShape()[0]);
- BOOST_TEST(2 == tensor.GetInfo().GetShape()[0]);
- BOOST_CHECK_THROW( shape[1], InvalidArgumentException );
+ CHECK(2 == shape[0]);
+ CHECK(2 == tensor.GetShape()[0]);
+ CHECK(2 == tensor.GetInfo().GetShape()[0]);
+ CHECK_THROWS_AS( shape[1], InvalidArgumentException );
- BOOST_TEST(3 == shape[2]);
- BOOST_TEST(3 == tensor.GetShape()[2]);
- BOOST_TEST(3 == tensor.GetInfo().GetShape()[2]);
+ CHECK(3 == shape[2]);
+ CHECK(3 == tensor.GetShape()[2]);
+ CHECK(3 == tensor.GetInfo().GetShape()[2]);
armnn::TensorShape shape_equal;
armnn::TensorShape shape_different;
shape_equal = shape;
- BOOST_TEST(shape_equal == shape);
- BOOST_TEST(shape_different != shape);
+ CHECK(shape_equal == shape);
+ CHECK(shape_different != shape);
}
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_transitionFromUnknownToKnownDimensionsSizes)
+TEST_CASE("TensorShape_DynamicTensorType1_transitionFromUnknownToKnownDimensionsSizes")
{
std::vector<float> mutableDatum { 42.f, 42.f, 42.f,
0.0f, 0.1f, 0.2f };
@@ -373,74 +345,74 @@ BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_transitionFromUnknownToKnown
// Specify the number of dimensions
shape.SetNumDimensions(3);
- BOOST_CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
- BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK(false == shape.GetDimensionSpecificity(0));
- BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
- BOOST_CHECK(false == shape.GetDimensionSpecificity(2));
- BOOST_CHECK(!shape.AreAllDimensionsSpecified());
- BOOST_CHECK(!shape.IsAtLeastOneDimensionSpecified());
+ CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
+ CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK(false == shape.GetDimensionSpecificity(0));
+ CHECK(false == shape.GetDimensionSpecificity(1));
+ CHECK(false == shape.GetDimensionSpecificity(2));
+ CHECK(!shape.AreAllDimensionsSpecified());
+ CHECK(!shape.IsAtLeastOneDimensionSpecified());
// Specify dimension 0 and 2.
shape.SetDimensionSize(0, 2);
shape.SetDimensionSize(2, 3);
- BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
- BOOST_CHECK(true == shape.GetDimensionSpecificity(0));
- BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
- BOOST_CHECK(true == shape.GetDimensionSpecificity(2));
- BOOST_CHECK(!shape.AreAllDimensionsSpecified());
- BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+ CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+ CHECK(true == shape.GetDimensionSpecificity(0));
+ CHECK(false == shape.GetDimensionSpecificity(1));
+ CHECK(true == shape.GetDimensionSpecificity(2));
+ CHECK(!shape.AreAllDimensionsSpecified());
+ CHECK(shape.IsAtLeastOneDimensionSpecified());
info.SetShape(shape);
armnn::Tensor tensor2( info, &mutableDatum );
- BOOST_TEST(2 == shape[0]);
- BOOST_TEST(2 == tensor2.GetShape()[0]);
- BOOST_TEST(2 == tensor2.GetInfo().GetShape()[0]);
+ CHECK(2 == shape[0]);
+ CHECK(2 == tensor2.GetShape()[0]);
+ CHECK(2 == tensor2.GetInfo().GetShape()[0]);
- BOOST_CHECK_THROW(shape[1], InvalidArgumentException);
- BOOST_CHECK_THROW(tensor.GetShape()[1], InvalidArgumentException);
- BOOST_CHECK_THROW(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
+ CHECK_THROWS_AS(shape[1], InvalidArgumentException);
+ CHECK_THROWS_AS(tensor.GetShape()[1], InvalidArgumentException);
+ CHECK_THROWS_AS(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
- BOOST_TEST(3 == shape[2]);
- BOOST_TEST(3 == tensor2.GetShape()[2]);
- BOOST_TEST(3 == tensor2.GetInfo().GetShape()[2]);
+ CHECK(3 == shape[2]);
+ CHECK(3 == tensor2.GetShape()[2]);
+ CHECK(3 == tensor2.GetInfo().GetShape()[2]);
armnn::TensorShape shape_equal;
armnn::TensorShape shape_different;
shape_equal = shape;
- BOOST_TEST(shape_equal == shape);
- BOOST_TEST(shape_different != shape);
+ CHECK(shape_equal == shape);
+ CHECK(shape_different != shape);
// Specify dimension 1.
shape.SetDimensionSize(1, 5);
- BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK_MESSAGE(30 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
- BOOST_CHECK(true == shape.GetDimensionSpecificity(0));
- BOOST_CHECK(true == shape.GetDimensionSpecificity(1));
- BOOST_CHECK(true == shape.GetDimensionSpecificity(2));
- BOOST_CHECK(shape.AreAllDimensionsSpecified());
- BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+ CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK_MESSAGE(30 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+ CHECK(true == shape.GetDimensionSpecificity(0));
+ CHECK(true == shape.GetDimensionSpecificity(1));
+ CHECK(true == shape.GetDimensionSpecificity(2));
+ CHECK(shape.AreAllDimensionsSpecified());
+ CHECK(shape.IsAtLeastOneDimensionSpecified());
}
-BOOST_AUTO_TEST_CASE(Tensor_emptyConstructors)
+TEST_CASE("Tensor_emptyConstructors")
{
auto shape = armnn::TensorShape();
- BOOST_CHECK_MESSAGE( 0 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
- BOOST_CHECK_MESSAGE( 0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
- BOOST_CHECK( armnn::Dimensionality::Specified == shape.GetDimensionality());
- BOOST_CHECK( shape.AreAllDimensionsSpecified());
- BOOST_CHECK_THROW( shape[0], InvalidArgumentException );
+ CHECK_MESSAGE( 0 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+ CHECK_MESSAGE( 0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+ CHECK( armnn::Dimensionality::Specified == shape.GetDimensionality());
+ CHECK( shape.AreAllDimensionsSpecified());
+ CHECK_THROWS_AS( shape[0], InvalidArgumentException );
auto tensor = armnn::Tensor();
- BOOST_CHECK_MESSAGE( 0 == tensor.GetNumDimensions(), "Number of dimensions is " << tensor.GetNumDimensions());
- BOOST_CHECK_MESSAGE( 0 == tensor.GetNumElements(), "Number of elements is " << tensor.GetNumElements());
- BOOST_CHECK_MESSAGE( 0 == tensor.GetShape().GetNumDimensions(), "Number of dimensions is " <<
+ CHECK_MESSAGE( 0 == tensor.GetNumDimensions(), "Number of dimensions is " << tensor.GetNumDimensions());
+ CHECK_MESSAGE( 0 == tensor.GetNumElements(), "Number of elements is " << tensor.GetNumElements());
+ CHECK_MESSAGE( 0 == tensor.GetShape().GetNumDimensions(), "Number of dimensions is " <<
tensor.GetShape().GetNumDimensions());
- BOOST_CHECK_MESSAGE( 0 == tensor.GetShape().GetNumElements(), "Number of dimensions is " <<
+ CHECK_MESSAGE( 0 == tensor.GetShape().GetNumElements(), "Number of dimensions is " <<
tensor.GetShape().GetNumElements());
- BOOST_CHECK( armnn::Dimensionality::Specified == tensor.GetShape().GetDimensionality());
- BOOST_CHECK( tensor.GetShape().AreAllDimensionsSpecified());
- BOOST_CHECK_THROW( tensor.GetShape()[0], InvalidArgumentException );
+ CHECK( armnn::Dimensionality::Specified == tensor.GetShape().GetDimensionality());
+ CHECK( tensor.GetShape().AreAllDimensionsSpecified());
+ CHECK_THROWS_AS( tensor.GetShape()[0], InvalidArgumentException );
+}
}
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 6563517da1..8462290f81 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -5,12 +5,14 @@
#include "TestInputOutputLayerVisitor.hpp"
#include "Network.hpp"
+#include <doctest/doctest.h>
+
namespace armnn
{
-BOOST_AUTO_TEST_SUITE(TestInputOutputLayerVisitor)
-
-BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndName)
+TEST_SUITE("TestInputOutputLayerVisitor")
+{
+TEST_CASE("CheckInputLayerVisitorBindingIdAndName")
{
const char* layerName = "InputLayer";
TestInputLayerVisitor visitor(1, layerName);
@@ -20,7 +22,7 @@ BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndName)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndNameNull)
+TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
{
TestInputLayerVisitor visitor(1);
NetworkImpl net;
@@ -29,7 +31,7 @@ BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndNameNull)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndName)
+TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
{
const char* layerName = "OutputLayer";
TestOutputLayerVisitor visitor(1, layerName);
@@ -39,7 +41,7 @@ BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndName)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndNameNull)
+TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
{
TestOutputLayerVisitor visitor(1);
NetworkImpl net;
@@ -48,6 +50,6 @@ BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndNameNull)
layer->Accept(visitor);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.hpp b/src/armnn/test/TestInputOutputLayerVisitor.hpp
index f67a65cd09..b89089530e 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.hpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.hpp
@@ -5,14 +5,14 @@
#pragma once
#include "TestLayerVisitor.hpp"
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
namespace armnn
{
void CheckLayerBindingId(LayerBindingId visitorId, LayerBindingId id)
{
- BOOST_CHECK_EQUAL(visitorId, id);
+ CHECK_EQ(visitorId, id);
}
// Concrete TestLayerVisitor subclasses for layers taking LayerBindingId argument with overridden VisitLayer methods
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index ba30dbc666..ec405119d1 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -3,9 +3,10 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "TestLayerVisitor.hpp"
+#include <doctest/doctest.h>
+
namespace armnn
{
@@ -13,29 +14,29 @@ void TestLayerVisitor::CheckLayerName(const char* name)
{
if (name == nullptr)
{
- BOOST_CHECK(m_LayerName == nullptr);
+ CHECK(m_LayerName == nullptr);
}
else if (m_LayerName == nullptr)
{
- BOOST_CHECK(name == nullptr);
+ CHECK(name == nullptr);
}
else
{
- BOOST_CHECK_EQUAL(m_LayerName, name);
+ CHECK_EQ(std::string(m_LayerName), std::string(name));
}
}
void TestLayerVisitor::CheckLayerPointer(const IConnectableLayer* layer)
{
- BOOST_CHECK(layer != nullptr);
+ CHECK(layer != nullptr);
}
void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual)
{
- BOOST_CHECK(expected.GetInfo() == actual.GetInfo());
- BOOST_CHECK(expected.GetNumDimensions() == actual.GetNumDimensions());
- BOOST_CHECK(expected.GetNumElements() == actual.GetNumElements());
- BOOST_CHECK(expected.GetNumBytes() == actual.GetNumBytes());
+ CHECK(expected.GetInfo() == actual.GetInfo());
+ CHECK(expected.GetNumDimensions() == actual.GetNumDimensions());
+ CHECK(expected.GetNumElements() == actual.GetNumElements());
+ CHECK(expected.GetNumBytes() == actual.GetNumBytes());
if (expected.GetNumBytes() == actual.GetNumBytes())
{
//check data is the same byte by byte
@@ -43,7 +44,7 @@ void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const Cons
const unsigned char* actualPtr = static_cast<const unsigned char*>(actual.GetMemoryArea());
for (unsigned int i = 0; i < expected.GetNumBytes(); i++)
{
- BOOST_CHECK(*(expectedPtr + i) == *(actualPtr + i));
+ CHECK(*(expectedPtr + i) == *(actualPtr + i));
}
}
}
@@ -51,7 +52,7 @@ void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const Cons
void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
const Optional<ConstTensor>& actual)
{
- BOOST_CHECK(expected.has_value() == actual.has_value());
+ CHECK(expected.has_value() == actual.has_value());
if (expected.has_value() && actual.has_value())
{
CheckConstTensors(expected.value(), actual.value());
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 39e254339f..39c00f4604 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -7,11 +7,13 @@
#include <armnn/Exceptions.hpp>
+#include <doctest/doctest.h>
+
namespace
{
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameAndDescriptor) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name, testName) \
+TEST_CASE(#testName) \
{ \
const char* layerName = "name##Layer"; \
armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
@@ -21,8 +23,8 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameAndDescriptor) \
layer->Accept(visitor); \
}
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptrAndDescriptor) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name, testName) \
+TEST_CASE(#testName) \
{ \
armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
Test##name##LayerVisitor visitor(descriptor); \
@@ -31,10 +33,6 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptrAndDescriptor) \
layer->Accept(visitor); \
}
-#define TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name)
-
template<typename Descriptor> Descriptor GetDescriptor();
template<>
@@ -273,35 +271,93 @@ armnn::TransposeDescriptor GetDescriptor<armnn::TransposeDescriptor>()
} // anonymous namespace
-BOOST_AUTO_TEST_SUITE(TestNameAndDescriptorLayerVisitor)
-
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Activation)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ArgMinMax)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(DepthToSpace)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(BatchToSpaceNd)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Comparison)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Concat)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ElementwiseUnary)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Fill)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Gather)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogicalBinary)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Mean)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Normalization)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Pad)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Permute)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Pooling2d)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Reshape)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Resize)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Slice)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Softmax)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(SpaceToBatchNd)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(SpaceToDepth)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Splitter)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Stack)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(StridedSlice)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Transpose)
-
-BOOST_AUTO_TEST_SUITE_END()
+TEST_SUITE("TestNameAndDescriptorLayerVisitor")
+{
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckAdditionLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ArgMinMax, CheckArgMinMaxLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(DepthToSpace, CheckDepthToSpaceLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(BatchToSpaceNd, CheckBatchToSpaceNdLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Comparison, CheckComparisonLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Concat, CheckConcatLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseUnary, CheckElementwiseUnaryLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Fill, CheckFillLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Gather, CheckGatherLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(InstanceNormalization,
+ CheckInstanceNormalizationLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(L2Normalization, CheckL2NormalizationLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(LogicalBinary, CheckLogicalBinaruLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(LogSoftmax, CheckLogSoftmaxLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Mean, CheckMeanLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Normalization, CheckNormalizationLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Pad, CheckPadLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Permute, CheckPermuteLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Pooling2d, CheckPooling2dLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Reshape, CheckReshapeLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Resize, CheckResizeLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Slice, CheckSliceLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Softmax, CheckSoftmaxLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(SpaceToBatchNd, CheckSpaceToBatchNdLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(SpaceToDepth, CheckSpaceToDepthLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Splitter, CheckSplitterLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Stack, CheckStackLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(StridedSlice, CheckStridedSliceLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Transpose, CheckTransposeLayerVisitorNameAndDescriptor)
+
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Activation,
+ CheckAdditionLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ArgMinMax,
+ CheckArgMinMaxLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(DepthToSpace,
+ CheckDepthToSpaceLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(BatchToSpaceNd,
+ CheckBatchToSpaceNdLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Comparison,
+ CheckComparisonLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Concat,
+ CheckConcatLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseUnary,
+ CheckElementwiseUnaryLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Fill,
+ CheckFillLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Gather,
+ CheckGatherLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(InstanceNormalization,
+ CheckInstanceNormalizationLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(L2Normalization,
+ CheckL2NormalizationLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(LogicalBinary,
+ CheckLogicalBinaruLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(LogSoftmax,
+ CheckLogSoftmaxLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Mean,
+ CheckMeanLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Normalization,
+ CheckNormalizationLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Pad,
+ CheckPadLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Permute,
+ CheckPermuteLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Pooling2d,
+ CheckPooling2dLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Reshape,
+ CheckReshapeLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Resize,
+ CheckResizeLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Slice,
+ CheckSliceLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Softmax,
+ CheckSoftmaxLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(SpaceToBatchNd,
+ CheckSpaceToBatchNdLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(SpaceToDepth,
+ CheckSpaceToDepthLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Splitter,
+ CheckSplitterLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Stack,
+ CheckStackLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(StridedSlice,
+ CheckStridedSliceLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Transpose,
+ CheckTransposeLayerVisitorNameNullptrAndDescriptor)
+
+}
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index c911caa699..a3c1420388 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -6,8 +6,7 @@
#include "TestLayerVisitor.hpp"
-
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
namespace
{
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 971d7eeab7..00d65f8e76 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -7,13 +7,13 @@
#include <Network.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
namespace
{
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME(name, testName) \
+TEST_CASE(#testName) \
{ \
Test##name##LayerVisitor visitor("name##Layer"); \
armnn::NetworkImpl net; \
@@ -21,8 +21,8 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
layer->Accept(visitor); \
}
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptr) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, testName) \
+TEST_CASE(#testName) \
{ \
Test##name##LayerVisitor visitor; \
armnn::NetworkImpl net; \
@@ -30,26 +30,35 @@ BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptr) \
layer->Accept(visitor); \
}
-#define TEST_SUITE_NAME_ONLY_LAYER_VISITOR(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name)
-
} // anonymous namespace
-BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor)
-
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Addition)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Dequantize)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Division)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Floor)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Maximum)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Merge)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Minimum)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rank)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
-
-BOOST_AUTO_TEST_SUITE_END()
+TEST_SUITE("TestNameOnlyLayerVisitor")
+{
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr)
+
+}
diff --git a/src/armnn/test/UnitTests.cpp b/src/armnn/test/UnitTests.cpp
index a587e9bc7b..cf532a76fd 100644
--- a/src/armnn/test/UnitTests.cpp
+++ b/src/armnn/test/UnitTests.cpp
@@ -2,12 +2,13 @@
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#define BOOST_TEST_MODULE UnitTests
-#include <boost/test/unit_test.hpp>
+
+#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#endif
+#include <doctest/doctest.h>
#include "UnitTests.hpp"
-#include <armnn/Logging.hpp>
-#include <armnn/utility/NumericCast.hpp>
struct ConfigureLoggingFixture
{
@@ -17,21 +18,20 @@ struct ConfigureLoggingFixture
}
};
-BOOST_GLOBAL_FIXTURE(ConfigureLoggingFixture);
-BOOST_AUTO_TEST_SUITE(LoggerSuite)
-BOOST_AUTO_TEST_CASE(LoggerTest)
+TEST_SUITE("LoggerSuite")
+{
+TEST_CASE_FIXTURE(ConfigureLoggingFixture, "LoggerTest")
{
std::stringstream ss;
-
{
struct StreamRedirector
{
public:
StreamRedirector(std::ostream& stream, std::streambuf* newStreamBuffer)
- : m_Stream(stream)
- , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer))
+ : m_Stream(stream)
+ , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer))
{}
~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
@@ -40,14 +40,12 @@ BOOST_AUTO_TEST_CASE(LoggerTest)
std::streambuf* m_BackupBuffer;
};
-
StreamRedirector redirect(std::cout, ss.rdbuf());
using namespace armnn;
SetLogFilter(LogSeverity::Trace);
SetAllLoggingSinks(true, false, false);
-
ARMNN_LOG(trace) << "My trace message; " << -2;
ARMNN_LOG(debug) << "My debug message; " << -1;
ARMNN_LOG(info) << "My info message; " << 0;
@@ -56,15 +54,14 @@ BOOST_AUTO_TEST_CASE(LoggerTest)
ARMNN_LOG(fatal) << "My fatal message; " << 3;
SetLogFilter(LogSeverity::Fatal);
-
}
- BOOST_CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos);
- BOOST_CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos);
- BOOST_CHECK(ss.str().find("Info: My info message; 0") != std::string::npos);
- BOOST_CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos);
- BOOST_CHECK(ss.str().find("Error: My error message; 2") != std::string::npos);
- BOOST_CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos);
+ CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos);
+ CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos);
+ CHECK(ss.str().find("Info: My info message; 0") != std::string::npos);
+ CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos);
+ CHECK(ss.str().find("Error: My error message; 2") != std::string::npos);
+ CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos);
}
-BOOST_AUTO_TEST_SUITE_END()
+} \ No newline at end of file
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index bb91c4d055..e4a8b96b52 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -14,7 +14,7 @@
#include "TensorHelpers.hpp"
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
inline void ConfigureLoggingTest()
{
@@ -28,7 +28,7 @@ inline void ConfigureLoggingTest()
// using FactoryType = armnn::ClWorkloadFactory;
// using FactoryType = armnn::NeonWorkloadFactory;
-/// Executes BOOST_TEST on CompareTensors() return value so that the predicate_result message is reported.
+/// Executes CHECK_MESSAGE on CompareTensors() return value so that the predicate_result message is reported.
/// If the test reports itself as not supported then the tensors are not compared.
/// Additionally this checks that the supportedness reported by the test matches the name of the test.
/// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name.
@@ -40,8 +40,8 @@ template <typename T, std::size_t n>
void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult<T, n>& testResult)
{
bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
- BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
- "The test name does not match the supportedness it is reporting");
+ CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
+ "The test name does not match the supportedness it is reporting");
if (testResult.m_Supported)
{
auto result = CompareTensors(testResult.m_ActualData,
@@ -49,7 +49,7 @@ void CompareTestResultIfSupported(const std::string& testName, const LayerTestRe
testResult.m_ActualShape,
testResult.m_ExpectedShape,
testResult.m_CompareBoolean);
- BOOST_TEST(result.m_Result, result.m_Message.str());
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
}
}
@@ -59,15 +59,15 @@ void CompareTestResultIfSupported(const std::string& testName, const std::vector
bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
for (unsigned int i = 0; i < testResult.size(); ++i)
{
- BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
- "The test name does not match the supportedness it is reporting");
+ CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
+ "The test name does not match the supportedness it is reporting");
if (testResult[i].m_Supported)
{
auto result = CompareTensors(testResult[i].m_ActualData,
testResult[i].m_ExpectedData,
testResult[i].m_ActualShape,
testResult[i].m_ExpectedShape);
- BOOST_TEST(result.m_Result, result.m_Message.str());
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
}
}
}
@@ -106,19 +106,31 @@ void RunTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr test
}
#define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \
- BOOST_AUTO_TEST_CASE(TestName) \
+ TEST_CASE(#TestName) \
{ \
TestFunction(); \
}
#define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \
- BOOST_AUTO_TEST_CASE(TestName) \
+ TEST_CASE(#TestName) \
+ { \
+ RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_AUTO_TEST_FIXTURE(TestName, Fixture, TestFunction, ...) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
{ \
RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
}
#define ARMNN_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
- BOOST_AUTO_TEST_CASE(TestName) \
+ TEST_CASE(#TestName) \
+ { \
+ RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_AUTO_TEST_FIXTURE_WITH_THF(TestName, Fixture, TestFunction, ...) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
{ \
RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
}
@@ -152,25 +164,25 @@ void CompareRefTestFunctionUsingTensorHandleFactory(const char* testName, TFuncP
}
#define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \
- BOOST_AUTO_TEST_CASE(TestName) \
+ TEST_CASE(#TestName) \
{ \
CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
}
#define ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
- BOOST_AUTO_TEST_CASE(TestName) \
+ TEST_CASE(#TestName) \
{ \
CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
}
#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \
- BOOST_FIXTURE_TEST_CASE(TestName, Fixture) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
{ \
CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
}
#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(TestName, Fixture, TestFunction, ...) \
- BOOST_FIXTURE_TEST_CASE(TestName, Fixture) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
{ \
CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
}
diff --git a/src/armnn/test/UtilityTests.cpp b/src/armnn/test/UtilityTests.cpp
index bad6c2250b..b3b6c3bb3d 100644
--- a/src/armnn/test/UtilityTests.cpp
+++ b/src/armnn/test/UtilityTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#define ARMNN_POLYMORPHIC_CAST_TESTABLE
#define ARMNN_NUMERIC_CAST_TESTABLE
@@ -17,9 +17,9 @@
#include <limits>
// Tests of include/Utility files
-BOOST_AUTO_TEST_SUITE(UtilityTests)
-
-BOOST_AUTO_TEST_CASE(PolymorphicDowncast)
+TEST_SUITE("UtilityTests")
+{
+TEST_CASE("PolymorphicDowncast")
{
using namespace armnn;
class Base
@@ -44,19 +44,19 @@ BOOST_AUTO_TEST_CASE(PolymorphicDowncast)
Child1 child1;
Base* base1 = &child1;
auto ptr1 = dynamic_cast<Child1*>(base1);
- BOOST_CHECK(ptr1 != nullptr);
- BOOST_CHECK_NO_THROW(armnn::PolymorphicDowncast<Child1*>(base1));
- BOOST_CHECK(armnn::PolymorphicDowncast<Child1*>(base1) == ptr1);
+ CHECK(ptr1 != nullptr);
+ CHECK_NOTHROW(armnn::PolymorphicDowncast<Child1*>(base1));
+ CHECK(armnn::PolymorphicDowncast<Child1*>(base1) == ptr1);
auto ptr2 = dynamic_cast<Child2*>(base1);
- BOOST_CHECK(ptr2 == nullptr);
- BOOST_CHECK_THROW(armnn::PolymorphicDowncast<Child2*>(base1), std::bad_cast);
+ CHECK(ptr2 == nullptr);
+ CHECK_THROWS_AS(armnn::PolymorphicDowncast<Child2*>(base1), std::bad_cast);
armnn::IgnoreUnused(ptr1, ptr2);
}
-BOOST_AUTO_TEST_CASE(PolymorphicPointerDowncast_SharedPointer)
+TEST_CASE("PolymorphicPointerDowncast_SharedPointer")
{
using namespace armnn;
class Base
@@ -81,19 +81,19 @@ BOOST_AUTO_TEST_CASE(PolymorphicPointerDowncast_SharedPointer)
std::shared_ptr<Base> base1 = std::make_shared<Child1>();
std::shared_ptr<Child1> ptr1 = std::static_pointer_cast<Child1>(base1);
- BOOST_CHECK(ptr1);
- BOOST_CHECK_NO_THROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
- BOOST_CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
+ CHECK(ptr1);
+ CHECK_NOTHROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
+ CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
auto ptr2 = std::dynamic_pointer_cast<Child2>(base1);
- BOOST_CHECK(!ptr2);
- BOOST_CHECK_THROW(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
+ CHECK(!ptr2);
+ CHECK_THROWS_AS(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
armnn::IgnoreUnused(ptr1, ptr2);
}
-BOOST_AUTO_TEST_CASE(PolymorphicPointerDowncast_BuildInPointer)
+TEST_CASE("PolymorphicPointerDowncast_BuildInPointer")
{
using namespace armnn;
class Base
@@ -118,68 +118,68 @@ BOOST_AUTO_TEST_CASE(PolymorphicPointerDowncast_BuildInPointer)
Child1 child1;
Base* base1 = &child1;
auto ptr1 = dynamic_cast<Child1*>(base1);
- BOOST_CHECK(ptr1 != nullptr);
- BOOST_CHECK_NO_THROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
- BOOST_CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
+ CHECK(ptr1 != nullptr);
+ CHECK_NOTHROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
+ CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
auto ptr2 = dynamic_cast<Child2*>(base1);
- BOOST_CHECK(ptr2 == nullptr);
- BOOST_CHECK_THROW(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
+ CHECK(ptr2 == nullptr);
+ CHECK_THROWS_AS(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
armnn::IgnoreUnused(ptr1, ptr2);
}
-BOOST_AUTO_TEST_CASE(NumericCast)
+TEST_CASE("NumericCast")
{
using namespace armnn;
// To 8 bit
- BOOST_CHECK_THROW(numeric_cast<unsigned char>(-1), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<unsigned char>(1 << 8), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<unsigned char>(1L << 16), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<unsigned char>(1LL << 32), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<unsigned char>(-1), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<unsigned char>(1 << 8), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<unsigned char>(1L << 16), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<unsigned char>(1LL << 32), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<signed char>((1L << 8)*-1), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<signed char>((1L << 15)*-1), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<signed char>((1LL << 31)*-1), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<signed char>((1L << 8)*-1), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<signed char>((1L << 15)*-1), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<signed char>((1LL << 31)*-1), std::bad_cast);
- BOOST_CHECK_NO_THROW(numeric_cast<unsigned char>(1U));
- BOOST_CHECK_NO_THROW(numeric_cast<unsigned char>(1L));
- BOOST_CHECK_NO_THROW(numeric_cast<signed char>(-1));
- BOOST_CHECK_NO_THROW(numeric_cast<signed char>(-1L));
- BOOST_CHECK_NO_THROW(numeric_cast<signed char>((1 << 7)*-1));
+ CHECK_NOTHROW(numeric_cast<unsigned char>(1U));
+ CHECK_NOTHROW(numeric_cast<unsigned char>(1L));
+ CHECK_NOTHROW(numeric_cast<signed char>(-1));
+ CHECK_NOTHROW(numeric_cast<signed char>(-1L));
+ CHECK_NOTHROW(numeric_cast<signed char>((1 << 7)*-1));
// To 16 bit
- BOOST_CHECK_THROW(numeric_cast<uint16_t>(-1), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<uint16_t>(1L << 16), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<uint16_t>(1LL << 32), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<uint16_t>(-1), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<uint16_t>(1L << 16), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<uint16_t>(1LL << 32), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int16_t>(1L << 15), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int16_t>(1LL << 31), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int16_t>(1L << 15), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int16_t>(1LL << 31), std::bad_cast);
- BOOST_CHECK_NO_THROW(numeric_cast<uint16_t>(1L << 8));
- BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(1L << 7));
- BOOST_CHECK_NO_THROW(numeric_cast<int16_t>((1L << 15)*-1));
+ CHECK_NOTHROW(numeric_cast<uint16_t>(1L << 8));
+ CHECK_NOTHROW(numeric_cast<int16_t>(1L << 7));
+ CHECK_NOTHROW(numeric_cast<int16_t>((1L << 15)*-1));
- BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(1U << 8));
- BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(1U << 14));
+ CHECK_NOTHROW(numeric_cast<int16_t>(1U << 8));
+ CHECK_NOTHROW(numeric_cast<int16_t>(1U << 14));
// To 32 bit
- BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1));
- BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1 << 8));
- BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1L << 16));
- BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1LL << 31));
+ CHECK_NOTHROW(numeric_cast<uint32_t>(1));
+ CHECK_NOTHROW(numeric_cast<uint32_t>(1 << 8));
+ CHECK_NOTHROW(numeric_cast<uint32_t>(1L << 16));
+ CHECK_NOTHROW(numeric_cast<uint32_t>(1LL << 31));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(-1));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>((1L << 8)*-1));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>((1L << 16)*-1));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>((1LL << 31)*-1));
+ CHECK_NOTHROW(numeric_cast<int32_t>(-1));
+ CHECK_NOTHROW(numeric_cast<int32_t>((1L << 8)*-1));
+ CHECK_NOTHROW(numeric_cast<int32_t>((1L << 16)*-1));
+ CHECK_NOTHROW(numeric_cast<int32_t>((1LL << 31)*-1));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U << 8));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U << 16));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U << 30));
+ CHECK_NOTHROW(numeric_cast<int32_t>(1U));
+ CHECK_NOTHROW(numeric_cast<int32_t>(1U << 8));
+ CHECK_NOTHROW(numeric_cast<int32_t>(1U << 16));
+ CHECK_NOTHROW(numeric_cast<int32_t>(1U << 30));
float float_max = std::numeric_limits<float>::max();
float float_min = std::numeric_limits<float>::lowest();
@@ -195,59 +195,59 @@ BOOST_AUTO_TEST_CASE(NumericCast)
auto double_max = std::numeric_limits<double>::max();
// Float to signed integer
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1.324f));
- BOOST_CHECK(1 == numeric_cast<int32_t>(1.324f));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(-1.0f));
- BOOST_CHECK(-1 == numeric_cast<int32_t>(-1.0f));
+ CHECK_NOTHROW(numeric_cast<int32_t>(1.324f));
+ CHECK(1 == numeric_cast<int32_t>(1.324f));
+ CHECK_NOTHROW(numeric_cast<int32_t>(-1.0f));
+ CHECK(-1 == numeric_cast<int32_t>(-1.0f));
- BOOST_CHECK_NO_THROW(numeric_cast<int8_t>(static_cast<float>(int8_max)));
- BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(static_cast<float>(int16_max)));
- BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(static_cast<float>(int32_max)));
+ CHECK_NOTHROW(numeric_cast<int8_t>(static_cast<float>(int8_max)));
+ CHECK_NOTHROW(numeric_cast<int16_t>(static_cast<float>(int16_max)));
+ CHECK_NOTHROW(numeric_cast<int32_t>(static_cast<float>(int32_max)));
- BOOST_CHECK_THROW(numeric_cast<int8_t>(float_max), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int16_t>(float_max), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int32_t>(float_max), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int8_t>(float_max), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int16_t>(float_max), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int32_t>(float_max), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int8_t>(float_min), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int16_t>(float_min), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int32_t>(float_min), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int8_t>(float_min), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int16_t>(float_min), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int32_t>(float_min), std::bad_cast);
// Signed integer to float
- BOOST_CHECK_NO_THROW(numeric_cast<float>(1));
- BOOST_CHECK(1.0 == numeric_cast<float>(1));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(-1));
- BOOST_CHECK(-1.0 == numeric_cast<float>(-1));
+ CHECK_NOTHROW(numeric_cast<float>(1));
+ CHECK(1.0 == numeric_cast<float>(1));
+ CHECK_NOTHROW(numeric_cast<float>(-1));
+ CHECK(-1.0 == numeric_cast<float>(-1));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(int8_max));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(int16_max));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(int32_max));
+ CHECK_NOTHROW(numeric_cast<float>(int8_max));
+ CHECK_NOTHROW(numeric_cast<float>(int16_max));
+ CHECK_NOTHROW(numeric_cast<float>(int32_max));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(int8_min));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(int16_min));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(int32_min));
+ CHECK_NOTHROW(numeric_cast<float>(int8_min));
+ CHECK_NOTHROW(numeric_cast<float>(int16_min));
+ CHECK_NOTHROW(numeric_cast<float>(int32_min));
// Unsigned integer to float
- BOOST_CHECK_NO_THROW(numeric_cast<float>(1U));
- BOOST_CHECK(1.0 == numeric_cast<float>(1U));
+ CHECK_NOTHROW(numeric_cast<float>(1U));
+ CHECK(1.0 == numeric_cast<float>(1U));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(uint8_max));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(uint16_max));
- BOOST_CHECK_NO_THROW(numeric_cast<float>(uint32_max));
+ CHECK_NOTHROW(numeric_cast<float>(uint8_max));
+ CHECK_NOTHROW(numeric_cast<float>(uint16_max));
+ CHECK_NOTHROW(numeric_cast<float>(uint32_max));
// Float to unsigned integer
- BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1.43243f));
- BOOST_CHECK(1 == numeric_cast<uint32_t>(1.43243f));
+ CHECK_NOTHROW(numeric_cast<uint32_t>(1.43243f));
+ CHECK(1 == numeric_cast<uint32_t>(1.43243f));
- BOOST_CHECK_THROW(numeric_cast<uint32_t>(-1.1f), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<uint32_t>(float_max), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<uint32_t>(-1.1f), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<uint32_t>(float_max), std::bad_cast);
// Double checks
- BOOST_CHECK_THROW(numeric_cast<int32_t>(double_max), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<int32_t>(double_max), std::bad_cast);
- BOOST_CHECK_THROW(numeric_cast<float>(double_max), std::bad_cast);
- BOOST_CHECK_NO_THROW(numeric_cast<double>(int32_max));
- BOOST_CHECK_NO_THROW(numeric_cast<long double>(int32_max));
+ CHECK_THROWS_AS(numeric_cast<int32_t>(double_max), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<int32_t>(double_max), std::bad_cast);
+ CHECK_THROWS_AS(numeric_cast<float>(double_max), std::bad_cast);
+ CHECK_NOTHROW(numeric_cast<double>(int32_max));
+ CHECK_NOTHROW(numeric_cast<long double>(int32_max));
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index f2ca95d7bd..1599d0cd35 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <armnn/BackendHelper.hpp>
@@ -15,52 +15,52 @@
#include <Graph.hpp>
#include <ResolveType.hpp>
-BOOST_AUTO_TEST_SUITE(Utils)
-
-BOOST_AUTO_TEST_CASE(DataTypeSize)
+TEST_SUITE("Utils")
+{
+TEST_CASE("DataTypeSize")
{
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
+ CHECK(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
+ CHECK(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
+ CHECK(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
+ CHECK(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithTooManyMappings)
+TEST_CASE("PermuteDescriptorWithTooManyMappings")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u, 5u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u, 5u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings1d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings1d")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 1u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 1u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings2d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings2d")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 2u, 0u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 2u, 0u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings3d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings3d")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 3u, 1u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 3u, 1u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings4d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings4d")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 4u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 1u, 2u, 4u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings5d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings5d")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 5u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 5u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithDuplicatedMappings)
+TEST_CASE("PermuteDescriptorWithDuplicatedMappings")
{
- BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 1u, 1u, 0u }), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(armnn::PermuteDescriptor({ 1u, 1u, 0u }), armnn::InvalidArgumentException);
}
-BOOST_AUTO_TEST_CASE(HalfType)
+TEST_CASE("HalfType")
{
using namespace half_float::literal;
armnn::Half a = 1.0_h;
@@ -69,25 +69,25 @@ BOOST_AUTO_TEST_CASE(HalfType)
armnn::Half c(b);
// Test half type
- BOOST_CHECK_EQUAL(a, b);
- BOOST_CHECK_EQUAL(sizeof(c), 2);
+ CHECK_EQ(a, b);
+ CHECK_EQ(sizeof(c), 2);
// Test half type is floating point type
- BOOST_CHECK(std::is_floating_point<armnn::Half>::value);
+ CHECK(std::is_floating_point<armnn::Half>::value);
// Test utility function returns correct type.
using ResolvedType = armnn::ResolveType<armnn::DataType::Float16>;
constexpr bool isHalfType = std::is_same<armnn::Half, ResolvedType>::value;
- BOOST_CHECK(isHalfType);
+ CHECK(isHalfType);
//Test utility functions return correct size
- BOOST_CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2);
+ CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2);
//Test utility functions return correct name
- BOOST_CHECK((GetDataTypeName(armnn::DataType::Float16) == std::string("Float16")));
+ CHECK((GetDataTypeName(armnn::DataType::Float16) == std::string("Float16")));
}
-BOOST_AUTO_TEST_CASE(BFloatType)
+TEST_CASE("BFloatType")
{
uint16_t v = 16256;
armnn::BFloat16 a(v);
@@ -95,83 +95,83 @@ BOOST_AUTO_TEST_CASE(BFloatType)
armnn::BFloat16 zero;
// Test BFloat16 type
- BOOST_CHECK_EQUAL(sizeof(a), 2);
- BOOST_CHECK_EQUAL(a, b);
- BOOST_CHECK_EQUAL(a.Val(), v);
- BOOST_CHECK_EQUAL(a, 1.0f);
- BOOST_CHECK_EQUAL(zero, 0.0f);
+ CHECK_EQ(sizeof(a), 2);
+ CHECK_EQ(a, b);
+ CHECK_EQ(a.Val(), v);
+ CHECK_EQ(a, 1.0f);
+ CHECK_EQ(zero, 0.0f);
// Infinity
float infFloat = std::numeric_limits<float>::infinity();
armnn::BFloat16 infBF(infFloat);
- BOOST_CHECK_EQUAL(infBF, armnn::BFloat16::Inf());
+ CHECK_EQ(infBF, armnn::BFloat16::Inf());
// NaN
float nan = std::numeric_limits<float>::quiet_NaN();
armnn::BFloat16 nanBF(nan);
- BOOST_CHECK_EQUAL(nanBF, armnn::BFloat16::Nan());
+ CHECK_EQ(nanBF, armnn::BFloat16::Nan());
// Test utility function returns correct type.
using ResolvedType = armnn::ResolveType<armnn::DataType::BFloat16>;
constexpr bool isBFloat16Type = std::is_same<armnn::BFloat16, ResolvedType>::value;
- BOOST_CHECK(isBFloat16Type);
+ CHECK(isBFloat16Type);
//Test utility functions return correct size
- BOOST_CHECK(GetDataTypeSize(armnn::DataType::BFloat16) == 2);
+ CHECK(GetDataTypeSize(armnn::DataType::BFloat16) == 2);
//Test utility functions return correct name
- BOOST_CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16")));
+ CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16")));
}
-BOOST_AUTO_TEST_CASE(Float32ToBFloat16Test)
+TEST_CASE("Float32ToBFloat16Test")
{
// LSB = 0, R = 0 -> round down
armnn::BFloat16 roundDown0 = armnn::BFloat16::Float32ToBFloat16(1.704735E38f); // 0x7F004000
- BOOST_CHECK_EQUAL(roundDown0.Val(), 0x7F00);
+ CHECK_EQ(roundDown0.Val(), 0x7F00);
// LSB = 1, R = 0 -> round down
armnn::BFloat16 roundDown1 = armnn::BFloat16::Float32ToBFloat16(9.18355E-41f); // 0x00010000
- BOOST_CHECK_EQUAL(roundDown1.Val(), 0x0001);
+ CHECK_EQ(roundDown1.Val(), 0x0001);
// LSB = 0, R = 1 all 0 -> round down
armnn::BFloat16 roundDown2 = armnn::BFloat16::Float32ToBFloat16(1.14794E-40f); // 0x00014000
- BOOST_CHECK_EQUAL(roundDown2.Val(), 0x0001);
+ CHECK_EQ(roundDown2.Val(), 0x0001);
// LSB = 1, R = 1 -> round up
armnn::BFloat16 roundUp = armnn::BFloat16::Float32ToBFloat16(-2.0234377f); // 0xC0018001
- BOOST_CHECK_EQUAL(roundUp.Val(), 0xC002);
+ CHECK_EQ(roundUp.Val(), 0xC002);
// LSB = 0, R = 1 -> round up
armnn::BFloat16 roundUp1 = armnn::BFloat16::Float32ToBFloat16(4.843037E-35f); // 0x0680C000
- BOOST_CHECK_EQUAL(roundUp1.Val(), 0x0681);
+ CHECK_EQ(roundUp1.Val(), 0x0681);
// Max positive value -> infinity
armnn::BFloat16 maxPositive = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::max()); // 0x7F7FFFFF
- BOOST_CHECK_EQUAL(maxPositive, armnn::BFloat16::Inf());
+ CHECK_EQ(maxPositive, armnn::BFloat16::Inf());
// Max negative value -> -infinity
armnn::BFloat16 maxNeg = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::lowest()); // 0xFF7FFFFF
- BOOST_CHECK_EQUAL(maxNeg.Val(), 0xFF80);
+ CHECK_EQ(maxNeg.Val(), 0xFF80);
// Min positive value
armnn::BFloat16 minPositive = armnn::BFloat16::Float32ToBFloat16(1.1754942E-38f); // 0x007FFFFF
- BOOST_CHECK_EQUAL(minPositive.Val(), 0x0080);
+ CHECK_EQ(minPositive.Val(), 0x0080);
// Min negative value
armnn::BFloat16 minNeg = armnn::BFloat16::Float32ToBFloat16(-1.1754942E-38f); // 0x807FFFFF
- BOOST_CHECK_EQUAL(minNeg.Val(), 0x8080);
+ CHECK_EQ(minNeg.Val(), 0x8080);
}
-BOOST_AUTO_TEST_CASE(BFloat16ToFloat32Test)
+TEST_CASE("BFloat16ToFloat32Test")
{
armnn::BFloat16 bf0(1.5f);
- BOOST_CHECK_EQUAL(bf0.ToFloat32(), 1.5f);
+ CHECK_EQ(bf0.ToFloat32(), 1.5f);
armnn::BFloat16 bf1(-5.525308E-25f);
- BOOST_CHECK_EQUAL(bf1.ToFloat32(), -5.525308E-25f);
+ CHECK_EQ(bf1.ToFloat32(), -5.525308E-25f);
armnn::BFloat16 bf2(-2.0625f);
- BOOST_CHECK_EQUAL(bf2.ToFloat32(), -2.0625f);
+ CHECK_EQ(bf2.ToFloat32(), -2.0625f);
uint16_t v = 32639;
armnn::BFloat16 bf3(v);
- BOOST_CHECK_EQUAL(bf3.ToFloat32(), 3.3895314E38f);
+ CHECK_EQ(bf3.ToFloat32(), 3.3895314E38f);
// Infinity
- BOOST_CHECK_EQUAL(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity());
+ CHECK_EQ(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity());
// NaN
- BOOST_CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32()));
+ CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32()));
}
-BOOST_AUTO_TEST_CASE(GraphTopologicalSortSimpleTest)
+TEST_CASE("GraphTopologicalSortSimpleTest")
{
std::map<int, std::vector<int>> graph;
@@ -192,13 +192,13 @@ BOOST_AUTO_TEST_CASE(GraphTopologicalSortSimpleTest)
std::vector<int> output;
bool sortCompleted = armnnUtils::GraphTopologicalSort<int>(targetNodes, getNodeInputs, output);
- BOOST_TEST(sortCompleted);
+ CHECK(sortCompleted);
std::vector<int> correctResult = {5, 4, 2, 0, 3, 1};
- BOOST_CHECK_EQUAL_COLLECTIONS(output.begin(), output.end(), correctResult.begin(), correctResult.end());
+ CHECK(std::equal(output.begin(), output.end(), correctResult.begin(), correctResult.end()));
}
-BOOST_AUTO_TEST_CASE(GraphTopologicalSortVariantTest)
+TEST_CASE("GraphTopologicalSortVariantTest")
{
std::map<int, std::vector<int>> graph;
@@ -220,13 +220,13 @@ BOOST_AUTO_TEST_CASE(GraphTopologicalSortVariantTest)
std::vector<int> output;
bool sortCompleted = armnnUtils::GraphTopologicalSort<int>(targetNodes, getNodeInputs, output);
- BOOST_TEST(sortCompleted);
+ CHECK(sortCompleted);
std::vector<int> correctResult = {6, 5, 3, 4, 2, 0, 1};
- BOOST_CHECK_EQUAL_COLLECTIONS(output.begin(), output.end(), correctResult.begin(), correctResult.end());
+ CHECK(std::equal(output.begin(), output.end(), correctResult.begin(), correctResult.end()));
}
-BOOST_AUTO_TEST_CASE(CyclicalGraphTopologicalSortTest)
+TEST_CASE("CyclicalGraphTopologicalSortTest")
{
std::map<int, std::vector<int>> graph;
@@ -244,32 +244,32 @@ BOOST_AUTO_TEST_CASE(CyclicalGraphTopologicalSortTest)
std::vector<int> output;
bool sortCompleted = armnnUtils::GraphTopologicalSort<int>(targetNodes, getNodeInputs, output);
- BOOST_TEST(!sortCompleted);
+ CHECK(!sortCompleted);
}
-BOOST_AUTO_TEST_CASE(PermuteQuantizationDim)
+TEST_CASE("PermuteQuantizationDim")
{
std::vector<float> scales {1.0f, 1.0f};
// Set QuantizationDim to be index 1
const armnn::TensorInfo perChannelInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32, scales, 1U);
- BOOST_CHECK(perChannelInfo.GetQuantizationDim().value() == 1U);
+ CHECK(perChannelInfo.GetQuantizationDim().value() == 1U);
// Permute so that index 1 moves to final index i.e. index 3
armnn::PermutationVector mappings({ 0, 3, 2, 1 });
auto permutedPerChannel = armnnUtils::Permuted(perChannelInfo, mappings);
// Check that QuantizationDim is in index 3
- BOOST_CHECK(permutedPerChannel.GetQuantizationDim().value() == 3U);
+ CHECK(permutedPerChannel.GetQuantizationDim().value() == 3U);
// Even if there is only a single scale the quantization dim still exists and needs to be permuted
std::vector<float> scale {1.0f};
const armnn::TensorInfo perChannelInfo1({ 1, 2, 3, 4 }, armnn::DataType::Float32, scale, 1U);
auto permuted = armnnUtils::Permuted(perChannelInfo1, mappings);
- BOOST_CHECK(permuted.GetQuantizationDim().value() == 3U);
+ CHECK(permuted.GetQuantizationDim().value() == 3U);
}
-BOOST_AUTO_TEST_CASE(PermuteVectorIterator)
+TEST_CASE("PermuteVectorIterator")
{
// We're slightly breaking the spirit of std::array.end() because we're using it as a
// variable length rather than fixed length. This test is to use a couple of iterators and
@@ -278,7 +278,7 @@ BOOST_AUTO_TEST_CASE(PermuteVectorIterator)
// Create zero length.
armnn::PermutationVector zeroPVector({});
// Begin should be equal to end.
- BOOST_CHECK(zeroPVector.begin() == zeroPVector.end());
+ CHECK(zeroPVector.begin() == zeroPVector.end());
// Create length 4. Summing the 4 values should be 6.
armnn::PermutationVector fourPVector({ 0, 3, 2, 1 });
@@ -287,7 +287,7 @@ BOOST_AUTO_TEST_CASE(PermuteVectorIterator)
{
sum += it;
}
- BOOST_CHECK(sum == 6);
+ CHECK(sum == 6);
// Directly use begin and end, make sure there are 4 iterations.
unsigned int iterations = 0;
auto itr = fourPVector.begin();
@@ -296,7 +296,7 @@ BOOST_AUTO_TEST_CASE(PermuteVectorIterator)
++iterations;
itr++;
}
- BOOST_CHECK(iterations == 4);
+ CHECK(iterations == 4);
// Do the same with 2 elements.
armnn::PermutationVector twoPVector({ 0, 1 });
@@ -307,20 +307,20 @@ BOOST_AUTO_TEST_CASE(PermuteVectorIterator)
++iterations;
itr++;
}
- BOOST_CHECK(iterations == 2);
+ CHECK(iterations == 2);
}
#if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(LayerSupportHandle)
+TEST_CASE("LayerSupportHandle")
{
auto layerSupportObject = armnn::GetILayerSupportByBackendId("CpuRef");
armnn::TensorInfo input;
std::string reasonIfUnsupported;
// InputLayer always supported for CpuRef
- BOOST_CHECK_EQUAL(layerSupportObject.IsInputSupported(input, reasonIfUnsupported), true);
+ CHECK_EQ(layerSupportObject.IsInputSupported(input, reasonIfUnsupported), true);
- BOOST_CHECK(layerSupportObject.IsBackendRegistered());
+ CHECK(layerSupportObject.IsBackendRegistered());
}
#endif
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index d0d728bfab..36a4507fc3 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -8,11 +8,12 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace optimizations;
void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
@@ -36,7 +37,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
input1->GetOutputSlot().Connect(add->GetInputSlot(1));
add->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<AdditionLayer>,
@@ -46,7 +47,7 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -54,15 +55,15 @@ void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName);
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == expectedDataType));
+ CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
+ CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType));
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSimpleTest)
+TEST_CASE("AddBroadcastReshapeLayerSimpleTest")
{
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
const TensorInfo info1({ 1 }, DataType::Float32);
@@ -71,7 +72,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSimpleTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer1DTest)
+TEST_CASE("AddBroadcastReshapeLayer1DTest")
{
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
const TensorInfo info1({ 5 }, DataType::Float32);
@@ -81,7 +82,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer1DTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer2DTest)
+TEST_CASE("AddBroadcastReshapeLayer2DTest")
{
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
const TensorInfo info1({ 3, 5 }, DataType::Float32);
@@ -91,7 +92,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer2DTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DTest)
+TEST_CASE("AddBroadcastReshapeLayer3DTest")
{
const TensorInfo info0({ 2, 1, 1, 1 }, DataType::Float32);
const TensorInfo info1({ 3, 4, 5 }, DataType::Float32);
@@ -101,7 +102,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DMergedTest)
+TEST_CASE("AddBroadcastReshapeLayer3DMergedTest")
{
const TensorInfo info0({ 2, 3, 1, 1 }, DataType::Float32);
const TensorInfo info1({ 3, 4, 5 }, DataType::Float32);
@@ -111,7 +112,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DMergedTest)
DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
+TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
{
Graph graph;
const TensorInfo info0({ 5 }, DataType::Float32);
@@ -130,7 +131,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
input1->GetOutputSlot().Connect(sub->GetInputSlot(1));
sub->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<SubtractionLayer>,
@@ -140,7 +141,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -148,15 +149,15 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0");
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
+ CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
+ CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
+TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
{
Graph graph;
const TensorInfo info0({ 1, 4, 5 }, DataType::QAsymmS8);
@@ -175,7 +176,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
input1->GetOutputSlot().Connect(div->GetInputSlot(1));
div->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<DivisionLayer>,
@@ -185,7 +186,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -193,15 +194,15 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0");
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
+ CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
+ CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
}
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
+TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
{
Graph graph;
const TensorInfo info0({ 3, 5 }, DataType::QAsymmU8);
@@ -220,7 +221,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
input1->GetOutputSlot().Connect(mul->GetInputSlot(1));
mul->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<MultiplicationLayer>,
@@ -230,7 +231,7 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has been added to the graph correctly
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -238,15 +239,15 @@ BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
- BOOST_TEST(reshapeLayer);
+ CHECK(reshapeLayer);
auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
- BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
+ CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
+ CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
}
-BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
+TEST_CASE("AddNoBroadcastReshapeLayerTest")
{
Graph graph;
const TensorInfo info0({ 1, 1, 1, 1 }, DataType::QAsymmU8);
@@ -265,7 +266,7 @@ BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
input1->GetOutputSlot().Connect(mul->GetInputSlot(1));
mul->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<MultiplicationLayer>,
@@ -275,17 +276,17 @@ BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has not been added to the graph
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<InputLayer>,
&IsLayerOfType<MultiplicationLayer>,
&IsLayerOfType<OutputLayer>));
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
- BOOST_TEST(!reshapeLayer);
+ CHECK(!reshapeLayer);
}
-BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
+TEST_CASE("ReshapeParentConstLayerTest")
{
Graph graph;
const TensorInfo info0({ 1, 2, 3, 5 }, DataType::QAsymmU8);
@@ -309,7 +310,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
constant->GetOutputSlot().Connect(mul->GetInputSlot(1));
mul->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<MultiplicationLayer>,
@@ -319,22 +320,22 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape layer has not been added to the graph
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<MultiplicationLayer>,
&IsLayerOfType<OutputLayer>));
TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 };
- BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == expectedShape);
+ CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == expectedShape);
- BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetNumDimensions() == info0.GetNumDimensions());
+ CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetNumDimensions() == info0.GetNumDimensions());
Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
- BOOST_TEST(!reshapeLayer);
+ CHECK(!reshapeLayer);
}
-BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
+TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
{
// In this test case we recreate the situation where an Addition layer has
// a constant second term, e.g. [1,512] + [1]. The AddBroadcastReshapeLayer
@@ -367,7 +368,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
// This second connection should prevent the modification of the const output tensor.
constant->GetOutputSlot().Connect(add2->GetInputSlot(1));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<AdditionLayer>,
@@ -378,7 +379,7 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
// Broadcast reshape should have been added before each addition layer.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<ConstantLayer>,
&IsLayerOfType<ReshapeLayer>,
@@ -388,14 +389,14 @@ BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
&IsLayerOfType<OutputLayer>));
// Ensure the output shape of the constant hasn't changed.
- BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
+ CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
// There should be two extra reshape layers with appropriate names.
Layer* const reshapeLayer1 = GetFirstLayerWithName(graph, "Reshape_for:add1-1");
Layer* const reshapeLayer2 = GetFirstLayerWithName(graph, "Reshape_for:add2-1");
- BOOST_TEST(reshapeLayer1);
- BOOST_TEST(reshapeLayer2);
+ CHECK(reshapeLayer1);
+ CHECK(reshapeLayer2);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index e4c1f2f413..b78a1bf207 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -8,14 +8,15 @@
#include <BFloat16.hpp>
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
+TEST_CASE("ConvertConstantsFloatToBFloatTest")
{
armnn::Graph graph;
@@ -48,27 +49,27 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));
// Check tensor data type after conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
// Check whether data matches expected Bf16 data
const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
- BOOST_CHECK(data[0] == BFloat16(0.0f));
- BOOST_CHECK(data[1] == BFloat16(-1.0f));
- BOOST_CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
- BOOST_CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
- BOOST_CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
- BOOST_CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
- BOOST_CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
- BOOST_CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
+ CHECK(data[0] == BFloat16(0.0f));
+ CHECK(data[1] == BFloat16(-1.0f));
+ CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
+ CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
+ CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
+ CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
+ CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
+ CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
}
-BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
+TEST_CASE("ConvertConstantsBFloatToFloatTest")
{
armnn::Graph graph;
@@ -104,24 +105,24 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
const float* data = fc->m_Weight->GetConstTensor<float>();
- BOOST_CHECK(data[0] == 0.0f);
- BOOST_CHECK(data[1] == -1.0f);
- BOOST_CHECK(data[2] == 3.796875f);
- BOOST_CHECK(data[3] == 3.1072295E29f);
- BOOST_CHECK(data[4] == 9.131327E-10f);
- BOOST_CHECK(data[5] == -3.796875f);
- BOOST_CHECK(data[6] == -3.1072295E29f);
- BOOST_CHECK(data[7] == -9.131327E-10f);
+ CHECK(data[0] == 0.0f);
+ CHECK(data[1] == -1.0f);
+ CHECK(data[2] == 3.796875f);
+ CHECK(data[3] == 3.1072295E29f);
+ CHECK(data[4] == 9.131327E-10f);
+ CHECK(data[5] == -3.796875f);
+ CHECK(data[6] == -3.1072295E29f);
+ CHECK(data[7] == -9.131327E-10f);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 1dfe7f431c..e6cca4f7bf 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -8,14 +8,15 @@
#include <Optimizer.hpp>
#include <Half.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
+TEST_CASE("ConvertConstantsFloatToHalfTest")
{
armnn::Graph graph;
@@ -41,20 +42,20 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
// Check tensor data type before conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
// Check tensor data type after conversion
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Check whether data matches expected fp16 data
const Half* data = fc->m_Weight->GetConstTensor<Half>();
- BOOST_CHECK(data[0] == Half(1.0f));
- BOOST_CHECK(data[1] == Half(2.0f));
- BOOST_CHECK(data[2] == Half(3.0f));
- BOOST_CHECK(data[3] == Half(4.0f));
+ CHECK(data[0] == Half(1.0f));
+ CHECK(data[1] == Half(2.0f));
+ CHECK(data[2] == Half(3.0f));
+ CHECK(data[3] == Half(4.0f));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 1ddf5262e8..2ec1279f33 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
+TEST_CASE("ConvertConstantsHalfToFloatTest")
{
armnn::Graph graph;
@@ -41,20 +42,20 @@ BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
//Test the tensor info is correct.
- BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+ CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
// Now test the data matches float32 data
const float* data = fc->m_Weight->GetConstTensor<float>();
- BOOST_CHECK(1.0f == data[0]);
- BOOST_CHECK(2.0f == data[1]);
- BOOST_CHECK(3.0f == data[2]);
- BOOST_CHECK(4.0f == data[3]);
+ CHECK(1.0f == data[0]);
+ CHECK(2.0f == data[1]);
+ CHECK(3.0f == data[2]);
+ CHECK(4.0f == data[3]);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 20cfab1cb7..7b4ac4170f 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -6,15 +6,16 @@
#include "LayersFwd.hpp"
#include <Network.hpp>
#include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <backendsCommon/TensorHandle.hpp>
#include <Optimizer.hpp>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn;
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
+TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
{
Graph graph;
const unsigned int inputShape[] = {1, 2, 2, 3};
@@ -67,7 +68,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
(conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimpleConv2d,
@@ -85,13 +86,13 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
(conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoConv2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
+TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
{
Graph graph;
const unsigned int inputShape[] = {1, 2, 2, 3};
@@ -146,7 +147,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
(depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimpleDepthwiseConv2d,
@@ -166,13 +167,13 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
(depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoDepthwiseConv2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer")
{
Graph graph;
const unsigned int inputShape[] = {1, 2, 2, 3};
@@ -218,7 +219,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
(pool2dLayer->GetParameters() == pooling2dDescriptor);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -248,13 +249,13 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
(pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
checkPadFoldedIntoPool2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized)
+TEST_CASE("FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized")
{
// In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other
// goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the
@@ -308,7 +309,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBe
};
// Initial sequence.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -318,7 +319,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBe
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
// The network should not change.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -326,7 +327,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBe
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding")
{
// In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization
// should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude.
@@ -380,7 +381,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddi
(pool2dLayer->GetParameters() == pooling2dDescriptor);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -389,14 +390,14 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddi
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
// The optimization should not have modified the graph.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
&IsLayerOfType<OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded")
{
// In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization
// should not work as the pad value will modify the result of the max pooling layer.
@@ -447,7 +448,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadV
(pool2dLayer->GetParameters() == pooling2dDescriptor);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -456,7 +457,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadV
armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
// The optimization should not have modified the graph.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<PadLayer>,
checkSimplePool2d,
@@ -464,7 +465,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadV
}
#if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization")
{
// The idea of this test to run a simple pad+pool2d network twice. Once
// with FoldPadLayerIntoPooling2dLayer enabled and a second time with it
@@ -523,7 +524,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
@@ -544,7 +545,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
// Optimize and load and execute it a second time.
optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
std::vector<float> goldenData(32, 0.0f);
std::vector<float> padOutputData(72, 0.0f);
OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -552,7 +553,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
// Now we can compare goldenData against optimizedData. They should be the same.
- BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+ CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
}
catch (const std::exception& e)
{
@@ -561,7 +562,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWitho
}
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
{
// The idea of this test to run a simple pad+conv2d network twice. Once
// with FoldPadLayerIntoConv2dLayer enabled and a second time with it
@@ -641,7 +642,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
@@ -662,7 +663,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
// Optimize and load and execute it a second time.
optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
std::vector<float> goldenData(100, 0.0f);
std::vector<float> padOutputData(108, 0.0f);
OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -670,7 +671,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
// Now we can compare goldenData against optimizedData. They should be the same.
- BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+ CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
}
catch (const std::exception& e)
{
@@ -679,7 +680,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutO
}
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
{
// The idea of this test to run a simple pad+depthwiseconv2d network twice. Once
// with FoldPadLayerIntoDeptwiseConv2dLayer enabled and a second time with it
@@ -759,7 +760,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
@@ -780,7 +781,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
// Optimize and load and execute it a second time.
optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
std::vector<float> goldenData(300, 0.0f);
std::vector<float> padOutputData(108, 0.0f);
OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -788,7 +789,7 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
// Now we can compare goldenData against optimizedData. They should be the same.
- BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+ CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
}
catch (const std::exception& e)
{
@@ -798,4 +799,4 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAn
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index f93fa77b0d..384b14c0cf 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
{
armnn::Graph graph;
@@ -31,18 +32,18 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)
input->GetOutputSlot().Connect(floor->GetInputSlot(0));
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
{
armnn::Graph graph;
@@ -82,37 +83,37 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
input->GetOutputSlot().Connect(conv->GetInputSlot(0));
conv->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
+ CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
+ CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
+ CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
+ CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
+ CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
// Check whether data matches expected Bf16 data
const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
- BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
- BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
- BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
+ CHECK(data[0] == armnn::BFloat16(0.0f));
+ CHECK(data[1] == armnn::BFloat16(-1.0f));
+ CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
+ CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
+ CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
+ CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
+ CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
+ CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
{
armnn::Graph graph;
@@ -152,35 +153,35 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
input->GetOutputSlot().Connect(fc->GetInputSlot(0));
fc->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
+ CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
+ CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
+ CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
+ CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
+ CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
// Check whether data matches expected Bf16 data
const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
- BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
- BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
- BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
+ CHECK(data[0] == armnn::BFloat16(0.0f));
+ CHECK(data[1] == armnn::BFloat16(-1.0f));
+ CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
+ CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
+ CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
+ CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
+ CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
+ CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
index 16037a8c0f..e2ac1bd69e 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
+TEST_CASE("Fp32NetworkToFp16OptimizationTest")
{
armnn::Graph graph;
@@ -31,15 +32,15 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
input->GetOutputSlot().Connect(floor->GetInputSlot(0));
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::FloorLayer>,
&IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 71a554b567..9e332136f6 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -10,15 +10,15 @@
#include <armnn/INetwork.hpp>
#include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <QuantizeHelper.hpp>
#include <string>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer")
+{
namespace armnn
{
@@ -352,8 +352,8 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
(layer->GetNameStr() == "fused-activation-into-receiverLayer");
};
- BOOST_CHECK(3 == graphFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphFused.cbegin(),
+ CHECK(3 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
graphFused.cend(),
&IsLayerOfType<InputLayer>,
checkFusedConv2d,
@@ -361,7 +361,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
@@ -374,7 +374,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
{0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
// Execute network
- BOOST_TEST(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) == Status::Success);
+ CHECK(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) == Status::Success);
// SECOND NETWORK: NotFused
// Construct ArmNN network
@@ -388,8 +388,8 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
- BOOST_CHECK(5 == graphNotFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+ CHECK(5 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
graphNotFused.cend(),
&IsLayerOfType<InputLayer>,
&IsLayerOfType<LayerType>,
@@ -399,7 +399,7 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
// Load network into runtime
NetworkId networkIdentifierNotFused;
- BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+ CHECK(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<T> inputDataNotFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
@@ -413,14 +413,14 @@ void FuseActivationIntoPreviousLayerTest(ActivationDescriptor activationDescript
{1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
// Execute network
- BOOST_TEST(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
+ CHECK(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
== Status::Success);
// Check the output of the fused-activation matches with the output of the activation in the "NotFused" network
for (unsigned int n = 0; n < outputDataFused.size(); ++n)
{
- BOOST_CHECK_CLOSE(static_cast<float>(outputDataFused[n]), static_cast<float>(outputDataNotFused[n]),
- T(tolerance));
+ auto outputNotFused = static_cast<float>(outputDataNotFused[n]);
+ CHECK(static_cast<float>(outputDataFused[n]) == doctest::Approx(outputNotFused).epsilon(tolerance));
}
}
@@ -445,7 +445,7 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
@@ -476,7 +476,7 @@ bool FuseActivationSimpleTest(ActivationDescriptor activationDescriptor, Compute
using namespace armnn;
#if defined(ARMCOMPUTENEON_ENABLED)
// ReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -484,7 +484,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -492,7 +492,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -500,7 +500,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -510,7 +510,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32CpuAccTest)
}
// BoundedReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -520,7 +520,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -530,7 +530,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::Float32 > , DataType::Float32 >
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -540,7 +540,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoBatchNormFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -552,7 +552,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32CpuAccTest)
}
// ReLU fused into Receiver Layers QAsymmU8
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoConvQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -560,7 +560,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvQAsymmU8CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -568,7 +568,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvQAsymmU8CpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -578,7 +578,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest)
}
// BoundedReLu fused into Receiver Layers QAsymmS8
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvQASymmS8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -588,7 +588,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8CpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>, DataType::QAsymmS8>
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -598,7 +598,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest)
FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > , DataType::QAsymmS8 >
(activationDescriptor, 0.0001f, Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -610,7 +610,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest)
}
// TanH fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseTanHIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -620,7 +620,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32CpuAccTest)
}
// HardSwish fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseHardSwishIntoConvFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -630,7 +630,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32CpuAccTest)
}
// Test that all receiver layers follow by all activation layers work, either fused or not fused
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32CpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat32CpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -638,17 +638,17 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32CpuAccTest)
activationDescriptor.m_Function = static_cast<ActivationFunction>(i);
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "DepthwiseConvolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::CpuAcc)), "BatchNorm + Activation function " << i);
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16CpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat16CpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -656,59 +656,59 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16CpuAccTest)
activationDescriptor.m_Function = static_cast<ActivationFunction>(i);
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "DepthwiseConvolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::CpuAcc)), "BatchNorm + Activation function " << i);
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8CpuAccTest)
+TEST_CASE("LayerFollowedByActivationQAsymmU8CpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::Sigmoid;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 256.f, 0)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 256.f, 0)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::TanH;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 128.f, 128)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc, 1.f / 128.f, 128)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::ReLu;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::HardSwish;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
}
@@ -716,7 +716,7 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8CpuAccTest)
#if defined(ARMCOMPUTECL_ENABLED)
// ReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -724,7 +724,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -732,7 +732,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -740,7 +740,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -748,7 +748,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -756,7 +756,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -764,7 +764,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -772,7 +772,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -782,7 +782,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat32GpuAccTest)
}
// BoundedReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -792,7 +792,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -802,7 +802,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -812,7 +812,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoBatchNormFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -822,7 +822,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -832,7 +832,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -842,7 +842,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -852,7 +852,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -864,7 +864,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDivFloat32GpuAccTest)
}
// ReLu fused into Receiver Layers Float16
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -872,7 +872,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -880,7 +880,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -888,7 +888,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -896,7 +896,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoMulFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -904,7 +904,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoAddFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -912,7 +912,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoSubFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -920,7 +920,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat16GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoDivFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -930,7 +930,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat16GpuAccTest)
}
// ReLU fused into Receiver Layers QAsymmU8
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoConvAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoConvAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -938,7 +938,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUQIntoConvAsymmU8GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoDWConvAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoDWConvAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -946,7 +946,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUQIntoDWConvAsymmU8GpuAccTest)
FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -956,7 +956,7 @@ BOOST_AUTO_TEST_CASE(FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest)
}
// BoundedReLu fused into Receiver Layers QAsymmS8
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvQASymmS8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -966,7 +966,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>, DataType::QAsymmS8>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -976,7 +976,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest)
FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > , DataType::QAsymmS8 >
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -988,7 +988,7 @@ BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest)
}
// TanH fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -996,7 +996,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1004,7 +1004,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1012,7 +1012,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1020,7 +1020,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseTanHIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1030,7 +1030,7 @@ BOOST_AUTO_TEST_CASE(FuseTanHIntoDivFloat32GpuAccTest)
}
// HardSwish fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoConvFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1038,7 +1038,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoMulFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1046,7 +1046,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoMulFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoAddFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1054,7 +1054,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoAddFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoSubFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1062,7 +1062,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoSubFloat32GpuAccTest)
FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, 0.0001f, Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoDivFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1072,7 +1072,7 @@ BOOST_AUTO_TEST_CASE(FuseHardSwishIntoDivFloat32GpuAccTest)
}
// Test that all receiver layers follow by all activation layers work, either fused or not fused
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32GpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat32GpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -1082,26 +1082,26 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32GpuAccTest)
activationDescriptor.m_B = -1.0f;
if (activationDescriptor.m_Function != ActivationFunction::Elu)
{
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "DepthwiseConvolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "BatchNorm + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Multiplication + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Addition + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Subtraction + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float32>, DataType::Float32>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float32>, DataType::Float32>
(activationDescriptor, Compute::GpuAcc)), "Division + Activation function " << i);
}
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16GpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat16GpuAccTest")
{
ActivationDescriptor activationDescriptor;
for (int i = 0; i != 12; ++i)
@@ -1111,71 +1111,71 @@ BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16GpuAccTest)
activationDescriptor.m_B = -1.0f;
if (activationDescriptor.m_Function != ActivationFunction::Elu)
{
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Depthwise + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "BatchNorm + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Multiplication + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Addition + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Subtraction + Activation function " << i);
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float16>, DataType::Float16>
+ CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float16>, DataType::Float16>
(activationDescriptor, Compute::GpuAcc)), "Division + Activation function " << i);
}
}
}
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8GpuAccTest)
+TEST_CASE("LayerFollowedByActivationQAsymmU8GpuAccTest")
{
ActivationDescriptor activationDescriptor;
activationDescriptor.m_Function = ActivationFunction::Sigmoid;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 256.f, 0)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 256.f, 0)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::TanH;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 128.f, 128)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc, 1.f / 128.f, 128)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::ReLu;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
activationDescriptor.m_A = 1.0f;
activationDescriptor.m_B = -1.0f;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
activationDescriptor.m_Function = ActivationFunction::HardSwish;
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
- BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+ CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
(activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
static_cast<int>(activationDescriptor.m_Function));
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index be66c5e4af..671f565054 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -10,12 +10,12 @@
#include <armnn/INetwork.hpp>
#include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer")
+{
namespace
{
@@ -194,8 +194,8 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
(layer->GetNameStr() == "fused-batchNorm-into-convolution");
};
- BOOST_CHECK(3 == graphFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphFused.cbegin(),
+ CHECK(3 == graphFused.GetNumLayers());
+ CHECK(CheckSequence(graphFused.cbegin(),
graphFused.cend(),
&IsLayerOfType<InputLayer>,
checkFusedConv2d,
@@ -203,7 +203,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// Load network into runtime
NetworkId networkIdentifier;
- BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+ CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<T> inputDataFused = GetVector<T>(48, 1.0f, 0.1f);
@@ -235,8 +235,8 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
- BOOST_CHECK(5 == graphNotFused.GetNumLayers());
- BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+ CHECK(5 == graphNotFused.GetNumLayers());
+ CHECK(CheckSequence(graphNotFused.cbegin(),
graphNotFused.cend(),
&IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<ConvLayerType>,
@@ -246,7 +246,7 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
// Load network into runtime
NetworkId networkIdentifierNotFused;
- BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+ CHECK(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
//Creates structures for inputs and outputs.
std::vector<T> inputDataNotFused = GetVector<T>(48, 1.0f, 0.1f);
@@ -269,33 +269,34 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b
runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused);
// Check the output of the fused-convolution matches with the output of the batchNormm in the "NotFused" network
+ auto epsilon = T(tolerance);
for (unsigned int n = 0; n < outputDataFused.size(); ++n)
{
- BOOST_CHECK_CLOSE(outputDataFused[n], outputDataNotFused[n], T(tolerance));
+ CHECK_EQ(outputDataFused[n], doctest::Approx(outputDataNotFused[n]).epsilon(epsilon));
}
}
// This unit test needs the reference backend, it's not available if the reference backend is not built
#if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoConv2DFloat32Test)
+TEST_CASE("FuseBatchNormIntoConv2DFloat32Test")
{
FuseBatchNormIntoConvTest<Conv2dTest, DataType::Float32>(false, 0.0001f, armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoConv2DFloat16Test)
+TEST_CASE("FuseBatchNormIntoConv2DFloat16Test")
{
FuseBatchNormIntoConvTest<Conv2dTest, DataType::Float16>(false, 0.1f, armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoDepthwiseConv2DFloat32Test)
+TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat32Test")
{
FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float32>(true, 0.0001f,armnn::Compute::CpuRef);
}
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoDepthwiseConv2DFloat16Test)
+TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat16Test")
{
FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float16>(true, 0.1f,armnn::Compute::CpuRef);
}
#endif
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
index 38b6397142..03d0d22f95 100644
--- a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
+++ b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
+TEST_CASE("InsertDebugOptimizationTest")
{
armnn::Graph graph;
@@ -31,15 +32,15 @@ BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
input->GetOutputSlot().Connect(floor->GetInputSlot(0));
floor->GetOutputSlot().Connect(output->GetInputSlot(0));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
// Run the optimizer
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer()));
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::FloorLayer>,
&IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 13c692670a..38a65a6173 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
+TEST_CASE("MovePermuteUpTest")
{
const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
@@ -66,7 +67,7 @@ BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
->GetOutputHandler()
.SetTensorInfo(info);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
@@ -76,7 +77,7 @@ BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
// The permute is moved to the top. New permutes for layers with multiple inputs.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
@@ -86,7 +87,7 @@ BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
std::list<std::string> testRelatedLayers = { permuteLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index cb41ff0dc1..68d277a4bd 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
+TEST_CASE("MoveTransposeUpTest")
{
const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
const armnn::TensorInfo transposed({ 1, 3, 5, 2 }, armnn::DataType::Float32);
@@ -67,7 +68,7 @@ BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
->GetOutputHandler()
.SetTensorInfo(info);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
@@ -77,7 +78,7 @@ BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
// The transpose is moved to the top. New transposes for layers with multiple inputs.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
@@ -87,7 +88,7 @@ BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
std::list<std::string> testRelatedLayers = { transposeLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
index 8c3c435265..694b103091 100644
--- a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
@@ -7,12 +7,13 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
+TEST_CASE("OptimizeConsecutiveReshapesTest")
{
armnn::Graph graph;
@@ -39,7 +40,7 @@ BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
reshape1->GetOutputHandler().SetTensorInfo(info1);
reshape2->GetOutputHandler().SetTensorInfo(info2);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
&IsLayerOfType<armnn::OutputLayer>));
@@ -53,13 +54,13 @@ BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
};
// The two reshapes are replaced by a single equivalent reshape.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
// Check the new reshape layer has the other two reshapes as related layers
std::list<std::string> testRelatedLayers = { reshape2Name, reshape1Name };
- BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
{
@@ -72,9 +73,9 @@ BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
// The two reshapes are removed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
index d87113c209..4b6dfe582b 100644
--- a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
+TEST_CASE("OptimizeInverseConversionsTest")
{
armnn::Graph graph;
@@ -32,7 +33,7 @@ BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert3");
graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert4");
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
&IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
&IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
@@ -42,8 +43,8 @@ BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
graph, armnn::MakeOptimizations(OptimizeInverseConversionsFp16(), OptimizeInverseConversionsFp32()));
// Check that all consecutive inverse conversions are removed
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
index 0664ef73b8..98c84d4fc2 100644
--- a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
+TEST_CASE("OptimizeInversePermutesTest")
{
armnn::Graph graph;
@@ -28,18 +29,18 @@ BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 3, 1, 2 }),
"perm0312");
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInversePermutes()));
// The permutes are removed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(OptimizeInverseTransposesTest)
+TEST_CASE("OptimizeInverseTransposesTest")
{
armnn::Graph graph;
@@ -55,15 +56,15 @@ BOOST_AUTO_TEST_CASE(OptimizeInverseTransposesTest)
armnn::TransposeDescriptor({ 0, 2, 3, 1 }),
"transpose0231");
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
&IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInverseTransposes()));
// The permutes are removed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index ab990e7c82..e91e16f132 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -8,11 +8,12 @@
#include <Network.hpp>
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
namespace
@@ -83,13 +84,13 @@ std::unique_ptr<NetworkImpl> CreateTransposeTestNetworkImpl()
/// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
/// Note this does not ensure the correctness of the optimization - that is done in the below test.
-BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
+TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest")
{
std::unique_ptr<NetworkImpl> network = CreateTestNetworkImpl();
Graph graph = network.get()->GetGraph();
// Confirm initial graph is as we expect
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
&IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
// Perform the optimization which should merge the two layers into a DepthToSpace
@@ -103,23 +104,23 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
&IsLayerOfType<OutputLayer>));
// Check the new layer has the two merged layers listed as related layers
std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
- BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
}
/// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
/// Note this does not ensure the correctness of the optimization - that is done in the below test.
-BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
+TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest")
{
std::unique_ptr<NetworkImpl> network = CreateTransposeTestNetworkImpl();
Graph graph = network.get()->GetGraph();
// Confirm initial graph is as we expect
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
&IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
// Perform the optimization which should merge the two layers into a DepthToSpace
@@ -133,12 +134,12 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
&IsLayerOfType<OutputLayer>));
// Check the new layer has the two merged layers listed as related layers
std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
- BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
}
// This unit test needs the reference backend, it's not available if the reference backend is not built
@@ -208,7 +209,7 @@ INetworkPtr CreateTransposeTestNetwork()
/// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
/// of the network (i.e. it still produces the correct output).
-BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
+TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
{
INetworkPtr network = CreateTestNetwork();
@@ -217,7 +218,7 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
// Confirm that the optimization has actually taken place
const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
- BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
+ CHECK(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
&IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
// Load the graph into a runtime so we can check it produces the correct output
@@ -250,12 +251,12 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
-3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f,
// clang-format on
};
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
/// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
/// of the network (i.e. it still produces the correct output).
-BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
+TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
{
INetworkPtr network = CreateTransposeTestNetwork();
@@ -264,7 +265,7 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
// Confirm that the optimization has actually taken place
const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
- BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
+ CHECK(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
&IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
// Load the graph into a runtime so we can check it produces the correct output
@@ -297,8 +298,8 @@ BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
-3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f,
// clang-format on
};
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
index 3f3c254d9a..fdd0a6ddd3 100644
--- a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
+TEST_CASE("PermuteAsReshapeTest")
{
armnn::Graph graph;
@@ -36,7 +37,7 @@ BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
->GetOutputHandler()
.SetTensorInfo(infoOut);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(PermuteAsReshape()));
@@ -50,11 +51,11 @@ BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
(reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { permuteLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
index b42c0a2cfb..df9a0dbc39 100644
--- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
+++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
@@ -8,12 +8,12 @@
#include <armnn/INetwork.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer_ReduceMultipleAxes")
+{
INetworkPtr CreateSimpleReduceNetwork(ReduceDescriptor reduceDescriptor,
TensorShape& inputShape,
TensorShape& outputShape)
@@ -22,10 +22,10 @@ INetworkPtr CreateSimpleReduceNetwork(ReduceDescriptor reduceDescriptor,
INetworkPtr network = INetwork::Create();
const std::string layerName("reduce_layer");
- const TensorInfo inputInfo (inputShape, DataType::Float32);
+ const TensorInfo inputInfo(inputShape, DataType::Float32);
const TensorInfo outputInfo(outputShape, DataType::Float32);
- IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+ IConnectableLayer* const inputLayer = network->AddInputLayer(0);
IConnectableLayer* const reduceLayer = network->AddReduceLayer(reduceDescriptor, layerName.c_str());
IConnectableLayer* const outputLayer1 = network->AddOutputLayer(0);
IConnectableLayer* const outputLayer2 = network->AddOutputLayer(1);
@@ -56,37 +56,36 @@ void ReduceWithMultipleAxesTest(INetworkPtr& network,
Graph& graph = GetGraphForTesting(optNet.get());
if (numOfAxes == 2)
{
- BOOST_CHECK(graph.GetNumLayers() == 5);
- BOOST_TEST(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<OutputLayer>,
- &IsLayerOfType<OutputLayer>));
- }
- else
+ CHECK(graph.GetNumLayers() == 5);
+ CHECK(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<OutputLayer>,
+ &IsLayerOfType<OutputLayer>));
+ } else
{
- BOOST_CHECK(graph.GetNumLayers() == 6);
- BOOST_TEST(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<ReduceLayer>,
- &IsLayerOfType<OutputLayer>,
- &IsLayerOfType<OutputLayer>));
+ CHECK(graph.GetNumLayers() == 6);
+ CHECK(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<ReduceLayer>,
+ &IsLayerOfType<OutputLayer>,
+ &IsLayerOfType<OutputLayer>));
}
// Get last layer in new chain, layers name follow 0, 1, 2 pattern
std::string layerName = "reduce_layer_" + std::to_string(numOfAxes - 1);
Layer* const reduceLayer = GetFirstLayerWithName(graph, layerName);
- BOOST_TEST(reduceLayer);
+ CHECK(reduceLayer);
auto reduceTensorInfo = reduceLayer->GetOutputSlot().GetTensorInfo();
// Tensorshape and the data type are correct
- BOOST_TEST((reduceTensorInfo.GetShape() == outputShape));
- BOOST_TEST((reduceTensorInfo.GetDataType() == DataType::Float32));
+ CHECK((reduceTensorInfo.GetShape() == outputShape));
+ CHECK((reduceTensorInfo.GetDataType() == DataType::Float32));
// Load network into runtime
NetworkId networkIdentifier;
@@ -95,45 +94,45 @@ void ReduceWithMultipleAxesTest(INetworkPtr& network,
// Create input and output tensors
std::vector<float> outputData(expectedOutput.size());
InputTensors inputTensors
- {
- { 0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data()) }
- };
+ {
+ {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ };
OutputTensors outputTensors
- {
- { 0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data()) },
- { 1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data()) }
- };
+ {
+ {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())},
+ {1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data())}
+ };
// Run inference
run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
// Checks the results
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
void ReduceSumWithTwoAxesKeepDimsTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 1, 2 };
+ reduceDescriptor.m_vAxis = {1, 2};
reduceDescriptor.m_KeepDims = true;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 1, 3, 2, 4 };
- TensorShape outputShape = { 1, 1, 1, 4 };
+ TensorShape inputShape = {1, 3, 2, 4};
+ TensorShape outputShape = {1, 1, 1, 4};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
+ const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
- 10.0f, 20.0f, 30.0f, 40.0f,
- 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 20.0f, 30.0f, 40.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
- 100.0f, 200.0f, 300.0f, 400.0f,
- 500.0f, 600.0f, 700.0f, 800.0f });
- const std::vector<float> expectedOutput({ 666.0f, 888.0f, 1110.0f, 1332.0f });
+ 100.0f, 200.0f, 300.0f, 400.0f,
+ 500.0f, 600.0f, 700.0f, 800.0f});
+ const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -146,26 +145,26 @@ void ReduceSumWithTwoAxesKeepDimsTest(Compute backendId)
void ReduceSumWithTwoAxesTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 1, 2 };
+ reduceDescriptor.m_vAxis = {1, 2};
reduceDescriptor.m_KeepDims = false;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 1, 3, 2, 4 };
- TensorShape outputShape = { 1, 4 };
+ TensorShape inputShape = {1, 3, 2, 4};
+ TensorShape outputShape = {1, 4};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
- 5.0f, 6.0f, 7.0f, 8.0f,
+ const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
- 10.0f, 20.0f, 30.0f, 40.0f,
- 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 20.0f, 30.0f, 40.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
- 100.0f, 200.0f, 300.0f, 400.0f,
- 500.0f, 600.0f, 700.0f, 800.0f });
- const std::vector<float> expectedOutput({ 666.0f, 888.0f, 1110.0f, 1332.0f });
+ 100.0f, 200.0f, 300.0f, 400.0f,
+ 500.0f, 600.0f, 700.0f, 800.0f});
+ const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -178,29 +177,29 @@ void ReduceSumWithTwoAxesTest(Compute backendId)
void ReduceSumWithThreeAxesKeepDimsTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 0, 2, 3 };
+ reduceDescriptor.m_vAxis = {0, 2, 3};
reduceDescriptor.m_KeepDims = true;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 2, 2, 2, 2 };
- TensorShape outputShape = { 1, 2, 1, 1 };
+ TensorShape inputShape = {2, 2, 2, 2};
+ TensorShape outputShape = {1, 2, 1, 1};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f,
- 3.0f, 4.0f,
+ const std::vector<float> inputData({1.0f, 2.0f,
+ 3.0f, 4.0f,
- 5.0f, 6.0f,
- 7.0f, 8.0f,
+ 5.0f, 6.0f,
+ 7.0f, 8.0f,
- 10.0f, 20.0f,
- 30.0f, 40.0f,
+ 10.0f, 20.0f,
+ 30.0f, 40.0f,
- 50.0f, 60.0f,
- 70.0f, 80.0f });
- const std::vector<float> expectedOutput({ 110.0f, 286.0f });
+ 50.0f, 60.0f,
+ 70.0f, 80.0f});
+ const std::vector<float> expectedOutput({110.0f, 286.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -213,29 +212,29 @@ void ReduceSumWithThreeAxesKeepDimsTest(Compute backendId)
void ReduceSumWithThreeAxesTest(Compute backendId)
{
armnn::ReduceDescriptor reduceDescriptor;
- reduceDescriptor.m_vAxis = { 0, 2, 3 };
+ reduceDescriptor.m_vAxis = {0, 2, 3};
reduceDescriptor.m_KeepDims = false;
reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
- TensorShape inputShape = { 2, 2, 2, 2 };
- TensorShape outputShape = { 2 };
+ TensorShape inputShape = {2, 2, 2, 2};
+ TensorShape outputShape = {2};
// Construct ArmNN network
INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
// Creates structures for input & output.
- const std::vector<float> inputData({ 1.0f, 2.0f,
- 3.0f, 4.0f,
+ const std::vector<float> inputData({1.0f, 2.0f,
+ 3.0f, 4.0f,
- 5.0f, 6.0f,
- 7.0f, 8.0f,
+ 5.0f, 6.0f,
+ 7.0f, 8.0f,
- 10.0f, 20.0f,
- 30.0f, 40.0f,
+ 10.0f, 20.0f,
+ 30.0f, 40.0f,
- 50.0f, 60.0f,
- 70.0f, 80.0f });
- const std::vector<float> expectedOutput({ 110.0f, 286.0f });
+ 50.0f, 60.0f,
+ 70.0f, 80.0f});
+ const std::vector<float> expectedOutput({110.0f, 286.0f});
ReduceWithMultipleAxesTest(network,
outputShape,
@@ -247,47 +246,47 @@ void ReduceSumWithThreeAxesTest(Compute backendId)
using namespace armnn;
#if defined(ARMCOMPUTENEON_ENABLED)
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesKeepDimsCpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesKeepDimsCpuAccTest")
{
ReduceSumWithTwoAxesKeepDimsTest(Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesCpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesCpuAccTest")
{
ReduceSumWithTwoAxesTest(Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesKeepDimsCpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesKeepDimsCpuAccTest")
{
ReduceSumWithThreeAxesKeepDimsTest(Compute::CpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesCpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesCpuAccTest")
{
ReduceSumWithThreeAxesTest(Compute::CpuAcc);
}
#endif
#if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesKeepDimsGpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesKeepDimsGpuAccTest")
{
ReduceSumWithTwoAxesKeepDimsTest(Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesGpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesGpuAccTest")
{
ReduceSumWithTwoAxesTest(Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesKeepDimsGpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesKeepDimsGpuAccTest")
{
ReduceSumWithThreeAxesKeepDimsTest(Compute::GpuAcc);
}
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesGpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesGpuAccTest")
{
ReduceSumWithThreeAxesTest(Compute::GpuAcc);
}
#endif
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
index 1c97267d89..069d28457e 100644
--- a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
+++ b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
+TEST_CASE("SquashEqualSiblingsTest")
{
armnn::Graph graph;
@@ -54,7 +55,7 @@ BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
input->GetOutputSlot().Connect(layer->GetInputSlot(0));
- BOOST_TEST(CheckSequence(
+ CHECK(CheckSequence(
graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::PermuteLayer>,
&IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
@@ -64,11 +65,11 @@ BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
// The permutes and reshapes are squashed.
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
&IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>,
&IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
&IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
index 1c9f15ce8d..5d1d950573 100644
--- a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
@@ -7,14 +7,15 @@
#include <Optimizer.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
using namespace armnn::optimizations;
-BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
+TEST_CASE("TransposeAsReshapeTest")
{
armnn::Graph graph;
@@ -36,7 +37,7 @@ BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
->GetOutputHandler()
.SetTensorInfo(infoOut);
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
&IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(TransposeAsReshape()));
@@ -50,11 +51,11 @@ BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
(reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
};
- BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
&IsLayerOfType<armnn::OutputLayer>));
std::list<std::string> testRelatedLayers = { transposeLayerName };
- BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+ CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file