aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp302
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp711
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp260
-rw-r--r--src/backends/neon/test/NeonJsonPrinterTests.cpp10
-rw-r--r--src/backends/neon/test/NeonLayerSupportTests.cpp56
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp36
-rw-r--r--src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp6
-rw-r--r--src/backends/neon/test/NeonMemCopyTests.cpp24
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp42
-rw-r--r--src/backends/neon/test/NeonRuntimeTests.cpp28
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp75
-rw-r--r--src/backends/neon/test/NeonTimerTest.cpp26
12 files changed, 795 insertions, 781 deletions
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index a8c0c8aca0..e3d73be9d1 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -18,8 +18,10 @@
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <neon/workloads/NeonWorkloads.hpp>
-BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
+#include <doctest/doctest.h>
+TEST_SUITE("CreateWorkloadNeon")
+{
namespace
{
@@ -77,18 +79,18 @@ static void NeonCreateActivationWorkloadTest()
ActivationQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+TEST_CASE("CreateActivationFloat16Workload")
{
NeonCreateActivationWorkloadTest<DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+TEST_CASE("CreateActivationFloatWorkload")
{
NeonCreateActivationWorkloadTest<DataType::Float32>();
}
@@ -109,13 +111,13 @@ static void NeonCreateElementwiseWorkloadTest()
auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+TEST_CASE("CreateAdditionFloat16Workload")
{
NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
AdditionQueueDescriptor,
@@ -124,7 +126,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
}
#endif
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE("CreateAdditionFloatWorkload")
{
NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
AdditionQueueDescriptor,
@@ -133,7 +135,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE("CreateSubtractionFloat16Workload")
{
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
@@ -142,7 +144,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
}
#endif
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+TEST_CASE("CreateSubtractionFloatWorkload")
{
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
@@ -150,7 +152,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+TEST_CASE("CreateSubtractionUint8Workload")
{
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
@@ -159,7 +161,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
+TEST_CASE("CreateMultiplicationFloat16Workload")
{
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
@@ -168,7 +170,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
}
#endif
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+TEST_CASE("CreateMultiplicationFloatWorkload")
{
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
@@ -176,7 +178,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+TEST_CASE("CreateMultiplicationUint8Workload")
{
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
@@ -184,7 +186,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+TEST_CASE("CreateDivisionFloatWorkloadTest")
{
NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
DivisionQueueDescriptor,
@@ -210,28 +212,28 @@ static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
+TEST_CASE("CreateBatchNormalizationFloat16NchwWorkload")
{
NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateBatchNormalizationFloat16NhwcWorkload")
{
NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NHWC);
}
#endif
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
+TEST_CASE("CreateBatchNormalizationFloatNchwWorkload")
{
NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
+TEST_CASE("CreateBatchNormalizationFloatNhwcWorkload")
{
NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NHWC);
}
@@ -252,33 +254,33 @@ static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayo
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+TEST_CASE("CreateConvolution2dFloat16NchwWorkload")
{
NeonCreateConvolution2dWorkloadTest<DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+TEST_CASE("CreateConvolution2dFloat16NhwcWorkload")
{
NeonCreateConvolution2dWorkloadTest<DataType::Float16>(DataLayout::NHWC);
}
#endif
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE("CreateConvolution2dFloatNchwWorkload")
{
NeonCreateConvolution2dWorkloadTest<DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
{
NeonCreateConvolution2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
+TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
{
Graph graph;
using ModelOptions = std::vector<BackendOptions>;
@@ -324,17 +326,17 @@ static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout)
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
: std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat32NhwcWorkload)
+TEST_CASE("CreateDepthWiseConvolution2dFloat32NhwcWorkload")
{
NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float32>(DataLayout::NHWC);
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat16NhwcWorkload)
+TEST_CASE("CreateDepthWiseConvolution2dFloat16NhwcWorkload")
{
NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float16>(DataLayout::NHWC);
}
@@ -357,28 +359,28 @@ static void NeonCreateFullyConnectedWorkloadTest()
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload)
+TEST_CASE("CreateFullyConnectedFloat16Workload")
{
NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
+TEST_CASE("CreateFullyConnectedFloatWorkload")
{
NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedQAsymmU8Workload)
+TEST_CASE("CreateFullyConnectedQAsymmU8Workload")
{
NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedQAsymmS8Workload)
+TEST_CASE("CreateFullyConnectedQAsymmS8Workload")
{
NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmS8>();
}
@@ -400,28 +402,28 @@ static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout)
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+TEST_CASE("CreateNormalizationFloat16NchwWorkload")
{
NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateNormalizationFloat16NhwcWorkload")
{
NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
}
#endif
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload)
+TEST_CASE("CreateNormalizationFloatNchwWorkload")
{
NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload)
+TEST_CASE("CreateNormalizationFloatNhwcWorkload")
{
NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
}
@@ -443,33 +445,33 @@ static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout::
Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
+TEST_CASE("CreatePooling2dFloat16Workload")
{
NeonCreatePooling2dWorkloadTest<DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+TEST_CASE("CreatePooling2dFloatNchwWorkload")
{
NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+TEST_CASE("CreatePooling2dFloatNhwcWorkload")
{
NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
+TEST_CASE("CreatePooling2dUint8NchwWorkload")
{
NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
+TEST_CASE("CreatePooling2dUint8NhwcWorkload")
{
NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
}
@@ -495,24 +497,24 @@ static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto alphaHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
+ CHECK(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE("CreatePreluFloat16Workload")
{
NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
}
#endif
-BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
+TEST_CASE("CreatePreluFloatWorkload")
{
NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
}
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE("CreatePreluUint8Workload")
{
NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
}
@@ -530,23 +532,23 @@ static void NeonCreateReshapeWorkloadTest()
ReshapeQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+TEST_CASE("CreateReshapeFloat16Workload")
{
NeonCreateReshapeWorkloadTest<DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+TEST_CASE("CreateReshapeFloatWorkload")
{
NeonCreateReshapeWorkloadTest<DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+TEST_CASE("CreateReshapeUint8Workload")
{
NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
}
@@ -569,34 +571,34 @@ static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
{
case DataLayout::NHWC:
predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
break;
default: // DataLayout::NCHW
predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
}
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+TEST_CASE("CreateResizeFloat32NchwWorkload")
{
NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+TEST_CASE("CreateResizeUint8NchwWorkload")
{
NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+TEST_CASE("CreateResizeFloat32NhwcWorkload")
{
NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+TEST_CASE("CreateResizeUint8NhwcWorkload")
{
NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
@@ -625,28 +627,28 @@ static void NeonCreateSoftmaxWorkloadTest()
tensorInfo.SetQuantizationOffset(-128);
tensorInfo.SetQuantizationScale(1.f / 256);
}
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+TEST_CASE("CreateSoftmaxFloat16Workload")
{
NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
+TEST_CASE("CreateSoftmaxFloatWorkload")
{
NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+TEST_CASE("CreateSoftmaxQAsymmU8Workload")
{
NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+TEST_CASE("CreateSoftmaxQAsymmS8Workload")
{
NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmS8>();
}
@@ -664,31 +666,31 @@ static void NeonSpaceToDepthWorkloadTest()
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+TEST_CASE("CreateSpaceToDepthFloat32Workload")
{
NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float32>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+TEST_CASE("CreateSpaceToDepthFloat16Workload")
{
NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float16>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+TEST_CASE("CreateSpaceToDepthQAsymm8Workload")
{
NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+TEST_CASE("CreateSpaceToDepthQSymm16Workload")
{
NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
-BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
+TEST_CASE("CreateSplitterWorkload")
{
Graph graph;
NeonWorkloadFactory factory =
@@ -699,19 +701,19 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
// Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
SplitterQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
auto outputHandle0 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
auto outputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
auto outputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
}
-BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
+TEST_CASE("CreateSplitterConcat")
{
// Tests that it is possible to decide which output of the splitter layer
// should be lined to which input of the concat layer.
@@ -736,17 +738,17 @@ BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
armnn::IAclTensorHandle* mIn0 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
armnn::IAclTensorHandle* mIn1 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
- BOOST_TEST(sOut0);
- BOOST_TEST(sOut1);
- BOOST_TEST(mIn0);
- BOOST_TEST(mIn1);
+ CHECK(sOut0);
+ CHECK(sOut1);
+ CHECK(mIn0);
+ CHECK(mIn1);
bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
- BOOST_TEST(validDataPointers);
+ CHECK(validDataPointers);
}
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+TEST_CASE("CreateSingleOutputMultipleInputs")
{
// Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
// We created a splitter with two outputs. That each of those outputs is used by two different activation layers
@@ -773,24 +775,24 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
armnn::IAclTensorHandle* activ1_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
- BOOST_TEST(sOut0);
- BOOST_TEST(sOut1);
- BOOST_TEST(activ0_0Im);
- BOOST_TEST(activ0_1Im);
- BOOST_TEST(activ1_0Im);
- BOOST_TEST(activ1_1Im);
+ CHECK(sOut0);
+ CHECK(sOut1);
+ CHECK(activ0_0Im);
+ CHECK(activ0_1Im);
+ CHECK(activ1_0Im);
+ CHECK(activ1_1Im);
bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
(sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
- BOOST_TEST(validDataPointers);
+ CHECK(validDataPointers);
}
#if defined(ARMNNREF_ENABLED)
// This test unit needs the reference backend, it's not available if the reference backend is not built
-BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon)
+TEST_CASE("CreateMemCopyWorkloadsNeon")
{
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -819,28 +821,28 @@ static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout)
TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+TEST_CASE("CreateL2NormalizationFloat16NchwWorkload")
{
NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateL2NormalizationFloat16NhwcWorkload")
{
NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
}
#endif
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload)
+TEST_CASE("CreateL2NormalizationNchwWorkload")
{
NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
+TEST_CASE("CreateL2NormalizationNhwcWorkload")
{
NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
}
@@ -860,18 +862,18 @@ static void NeonCreateLogSoftmaxWorkloadTest()
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
armnn::TensorInfo tensorInfo({4, 1}, DataType);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat16Workload)
+TEST_CASE("CreateLogSoftmaxFloat16Workload")
{
NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float16>();
}
#endif
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloatWorkload)
+TEST_CASE("CreateLogSoftmaxFloatWorkload")
{
NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float32>();
}
@@ -890,11 +892,11 @@ static void NeonCreateLstmWorkloadTest()
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
}
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+TEST_CASE("CreateLSTMWorkloadFloatWorkload")
{
NeonCreateLstmWorkloadTest<NeonLstmFloatWorkload>();
}
@@ -914,37 +916,37 @@ static void NeonCreateConcatWorkloadTest(std::initializer_list<unsigned int> out
auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE("CreateConcatDim0Float32Workload")
{
NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE("CreateConcatDim1Float32Workload")
{
NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE("CreateConcatDim3Float32Workload")
{
NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE("CreateConcatDim0Uint8Workload")
{
NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE("CreateConcatDim1Uint8Workload")
{
NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE("CreateConcatDim3Uint8Workload")
{
NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
@@ -971,25 +973,25 @@ static void NeonCreateStackWorkloadTest(const std::initializer_list<unsigned int
for (unsigned int i = 0; i < numInputs; ++i)
{
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[i]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
}
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+ CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
}
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE("CreateStackFloat32Workload")
{
NeonCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+TEST_CASE("CreateStackFloat16Workload")
{
NeonCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
#endif
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE("CreateStackUint8Workload")
{
NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
@@ -1005,27 +1007,27 @@ static void NeonCreateQuantizedLstmWorkloadTest()
QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
- BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+ CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
+ CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
- BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+ CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
- BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+ CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+ CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+ CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
}
-BOOST_AUTO_TEST_CASE(CreateQuantizedLstmWorkload)
+TEST_CASE("CreateQuantizedLstmWorkload")
{
NeonCreateQuantizedLstmWorkloadTest<NeonQuantizedLstmWorkload>();
}
@@ -1040,21 +1042,21 @@ static void NeonCreateQLstmWorkloadTest()
QLstmQueueDescriptor queueDescriptor = workload->GetData();
IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+ CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+ CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST((outputHandle->GetShape() == TensorShape({2, 4})));
- BOOST_TEST((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+ CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
+ CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
}
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
+TEST_CASE("CreateQLstmWorkloadTest")
{
NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>();
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index dc0a609ff7..5190e2ff61 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -22,14 +22,14 @@
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
#include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(NeonEndToEnd)
-
-std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc};
+TEST_SUITE("NeonEndToEnd")
+{
+std::vector<armnn::BackendId> neonDefaultBackends = {armnn::Compute::CpuAcc};
// Abs
-BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
+TEST_CASE("NeonAbsEndToEndTestFloat32")
{
std::vector<float> expectedOutput =
{
@@ -37,22 +37,22 @@ BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
};
- ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
UnaryOperation::Abs,
expectedOutput);
}
// Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
+TEST_CASE("ConstantUsage_Neon_Float32")
{
- BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
+ CHECK(ConstantUsageFloat32Test(neonDefaultBackends));
}
#if defined(ARMNNREF_ENABLED)
// This test unit needs the reference backend, it's not available if the reference backend is not built
-BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
+TEST_CASE("FallbackToCpuRef")
{
using namespace armnn;
@@ -83,519 +83,523 @@ BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
// Load it into the runtime. It should pass.
NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
}
#endif
-BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndTest)
+TEST_CASE("NeonGreaterSimpleEndToEndTest")
{
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndUint8Test)
+TEST_CASE("NeonGreaterSimpleEndToEndUint8Test")
{
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndTest)
+TEST_CASE("NeonGreaterBroadcastEndToEndTest")
{
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("NeonGreaterBroadcastEndToEndUint8Test")
{
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
+TEST_CASE("NeonConcatEndToEndDim0Test")
{
- ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim0Uint8Test")
{
- ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
+TEST_CASE("NeonConcatEndToEndDim1Test")
{
- ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim1Uint8Test")
{
- ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
+TEST_CASE("NeonConcatEndToEndDim3Test")
{
- ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim3Uint8Test")
{
- ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
// DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
{
- DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
{
- DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
{
- DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
{
- DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
{
- DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
{
- DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
{
- DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
{
- DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
{
- DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
{
- DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat32)
+TEST_CASE("NeonEluEndToEndTestFloat32")
{
- EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+ EluEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat16)
+TEST_CASE("NeonEluEndToEndTestFloat16")
{
- EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+ EluEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
}
// HardSwish
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat32)
+TEST_CASE("NeonHardSwishEndToEndTestFloat32")
{
- HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+ HardSwishEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat16)
+TEST_CASE("NeonHardSwishEndToEndTestFloat16")
{
- HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+ HardSwishEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("NeonHardSwishEndToEndTestQAsymmS8")
{
- HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
+ HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("NeonHardSwishEndToEndTestQAsymmU8")
{
- HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
+ HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
+TEST_CASE("NeonPreluEndToEndFloat32Test")
{
- PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
+ PreluEndToEndNegativeTest<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
+TEST_CASE("NeonPreluEndToEndTestUint8Test")
{
- PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest1")
{
- SpaceToDepthNhwcEndToEndTest1(defaultBackends);
+ SpaceToDepthNhwcEndToEndTest1(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("NeonSpaceToDepthNchwEndToEndTest1")
{
- SpaceToDepthNchwEndToEndTest1(defaultBackends);
+ SpaceToDepthNchwEndToEndTest1(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest2")
{
- SpaceToDepthNhwcEndToEndTest2(defaultBackends);
+ SpaceToDepthNhwcEndToEndTest2(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("NeonSpaceToDepthNchwEndToEndTest2")
{
- SpaceToDepthNchwEndToEndTest2(defaultBackends);
+ SpaceToDepthNchwEndToEndTest2(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
+TEST_CASE("NeonSplitter1dEndToEndTest")
{
- Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
+TEST_CASE("NeonSplitter1dEndToEndUint8Test")
{
- Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
+TEST_CASE("NeonSplitter2dDim0EndToEndTest")
{
- Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndTest)
+TEST_CASE("NeonSplitter2dDim1EndToEndTest")
{
- Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter2dDim0EndToEndUint8Test")
{
- Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter2dDim1EndToEndUint8Test")
{
- Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
+TEST_CASE("NeonSplitter3dDim0EndToEndTest")
{
- Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndTest)
+TEST_CASE("NeonSplitter3dDim1EndToEndTest")
{
- Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndTest)
+TEST_CASE("NeonSplitter3dDim2EndToEndTest")
{
- Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim0EndToEndUint8Test")
{
- Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim1EndToEndUint8Test")
{
- Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim2EndToEndUint8Test")
{
- Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
+TEST_CASE("NeonSplitter4dDim0EndToEndTest")
{
- Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndTest)
+TEST_CASE("NeonSplitter4dDim1EndToEndTest")
{
- Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndTest)
+TEST_CASE("NeonSplitter4dDim2EndToEndTest")
{
- Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndTest)
+TEST_CASE("NeonSplitter4dDim3EndToEndTest")
{
- Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim0EndToEndUint8Test")
{
- Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim1EndToEndUint8Test")
{
- Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim2EndToEndUint8Test")
{
- Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim3EndToEndUint8Test")
{
- Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
+TEST_CASE("NeonQuantizedLstmEndToEndTest")
{
- QuantizedLstmEndToEnd(defaultBackends);
+ QuantizedLstmEndToEnd(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNchwTest")
{
TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
- defaultBackends, armnn::DataLayout::NCHW);
+ neonDefaultBackends, armnn::DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NchwTest")
{
TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- defaultBackends, armnn::DataLayout::NCHW);
+ neonDefaultBackends, armnn::DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNhwcTest")
{
TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
- defaultBackends, armnn::DataLayout::NHWC);
+ neonDefaultBackends, armnn::DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NhwcTest")
{
TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- defaultBackends, armnn::DataLayout::NHWC);
+ neonDefaultBackends, armnn::DataLayout::NHWC);
}
-BOOST_AUTO_TEST_CASE(NeonImportNonAlignedInputPointerTest)
+TEST_CASE("NeonImportNonAlignedInputPointerTest")
{
- ImportNonAlignedInputPointerTest(defaultBackends);
+ ImportNonAlignedInputPointerTest(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonExportNonAlignedOutputPointerTest)
+TEST_CASE("NeonExportNonAlignedOutputPointerTest")
{
- ExportNonAlignedOutputPointerTest(defaultBackends);
+ ExportNonAlignedOutputPointerTest(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonImportAlignedPointerTest)
+TEST_CASE("NeonImportAlignedPointerTest")
{
- ImportAlignedPointerTest(defaultBackends);
+ ImportAlignedPointerTest(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonImportOnlyWorkload)
+TEST_CASE("NeonImportOnlyWorkload")
{
- ImportOnlyWorkload(defaultBackends);
+ ImportOnlyWorkload(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonExportOnlyWorkload)
+TEST_CASE("NeonExportOnlyWorkload")
{
- ExportOnlyWorkload(defaultBackends);
+ ExportOnlyWorkload(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonImportAndExportWorkload)
+TEST_CASE("NeonImportAndExportWorkload")
{
- ImportAndExportWorkload(defaultBackends);
+ ImportAndExportWorkload(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonExportOutputWithSeveralOutputSlotConnectionsTest)
+TEST_CASE("NeonExportOutputWithSeveralOutputSlotConnectionsTest")
{
- ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
+ ExportOutputWithSeveralOutputSlotConnectionsTest(neonDefaultBackends);
}
// InstanceNormalization
-BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest1")
{
- InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+ InstanceNormalizationNchwEndToEndTest1(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest2")
{
- InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+ InstanceNormalizationNchwEndToEndTest2(neonDefaultBackends);
}
// Fill
-BOOST_AUTO_TEST_CASE(NeonFillEndToEndTest)
+TEST_CASE("NeonFillEndToEndTest")
{
- FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
+ FillEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
{
- FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
+ FillEndToEnd<armnn::DataType::Float16>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonFillEndToEndTestInt32)
+TEST_CASE("NeonFillEndToEndTestInt32")
{
- FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
+ FillEndToEnd<armnn::DataType::Signed32>(neonDefaultBackends);
}
// ArgMinMax
-BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTest)
+TEST_CASE("NeonArgMaxSimpleTest")
{
- ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+ ArgMaxEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTest)
+TEST_CASE("NeonArgMinSimpleTest")
{
- ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+ ArgMinEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0Test)
+TEST_CASE("NeonArgMaxAxis0Test")
{
- ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis0Test)
+TEST_CASE("NeonArgMinAxis0Test")
{
- ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMinAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1Test)
+TEST_CASE("NeonArgMaxAxis1Test")
{
- ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis1Test)
+TEST_CASE("NeonArgMinAxis1Test")
{
- ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMinAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2Test)
+TEST_CASE("NeonArgMaxAxis2Test")
{
- ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis2Test)
+TEST_CASE("NeonArgMinAxis2Test")
{
- ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMinAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3Test)
+TEST_CASE("NeonArgMaxAxis3Test")
{
- ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
+TEST_CASE("NeonArgMinAxis3Test")
{
- ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+ ArgMinAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxSimpleTestQuantisedAsymm8")
{
- ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
+TEST_CASE("NeonArgMinSimpleTestQuantisedAsymm8")
{
- ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis0TestQuantisedAsymm8")
{
- ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis0TestQuantisedAsymm8")
{
- ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis1TestQuantisedAsymm8")
{
- ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis1TestQuantisedAsymm8")
{
- ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis2TestQuantisedAsymm8")
{
- ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis2TestQuantisedAsymm8")
{
- ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis3TestQuantisedAsymm8")
{
- ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis3TestQuantisedAsymm8")
{
- ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+ ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("NeonStridedSliceInvalidSliceEndToEndTest")
{
- StridedSliceInvalidSliceEndToEndTest(defaultBackends);
+ StridedSliceInvalidSliceEndToEndTest(neonDefaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest, * boost::unit_test::disabled())
-{
- std::vector<float> boxEncodings({
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, -1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f
- });
- std::vector<float> scores({
- 0.0f, 0.9f, 0.8f,
- 0.0f, 0.75f, 0.72f,
- 0.0f, 0.6f, 0.5f,
- 0.0f, 0.93f, 0.95f,
- 0.0f, 0.5f, 0.4f,
- 0.0f, 0.3f, 0.2f
- });
- std::vector<float> anchors({
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 100.5f, 1.0f, 1.0f
- });
- DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
-}
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessRegularNmsTest")
+//{
+// std::vector<float> boxEncodings({
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, -1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f
+// });
+// std::vector<float> scores({
+// 0.0f, 0.9f, 0.8f,
+// 0.0f, 0.75f, 0.72f,
+// 0.0f, 0.6f, 0.5f,
+// 0.0f, 0.93f, 0.95f,
+// 0.0f, 0.5f, 0.4f,
+// 0.0f, 0.3f, 0.2f
+// });
+// std::vector<float> anchors({
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 100.5f, 1.0f, 1.0f
+// });
+// DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
+// boxEncodings,
+// scores,
+// anchors);
+//}
inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
{
@@ -605,136 +609,141 @@ inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo&
}
}
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test, * boost::unit_test::disabled())
-{
- armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
- armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
- armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
-
- boxEncodingsInfo.SetQuantizationScale(1.0f);
- boxEncodingsInfo.SetQuantizationOffset(1);
- scoresInfo.SetQuantizationScale(0.01f);
- scoresInfo.SetQuantizationOffset(0);
- anchorsInfo.SetQuantizationScale(0.5f);
- anchorsInfo.SetQuantizationOffset(0);
-
- std::vector<float> boxEncodings({
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, -1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f
- });
- std::vector<float> scores({
- 0.0f, 0.9f, 0.8f,
- 0.0f, 0.75f, 0.72f,
- 0.0f, 0.6f, 0.5f,
- 0.0f, 0.93f, 0.95f,
- 0.0f, 0.5f, 0.4f,
- 0.0f, 0.3f, 0.2f
- });
- std::vector<float> anchors({
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 100.5f, 1.0f, 1.0f
- });
-
- std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
- std::vector<uint8_t> qScores(scores.size(), 0);
- std::vector<uint8_t> qAnchors(anchors.size(), 0);
- QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
- QuantizeData(qScores.data(), scores.data(), scoresInfo);
- QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
- qScores, qAnchors,
- 1.0f, 1, 0.01f, 0, 0.5f, 0);
-}
-
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest, * boost::unit_test::disabled())
-{
- std::vector<float> boxEncodings({
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, -1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f
- });
- std::vector<float> scores({
- 0.0f, 0.9f, 0.8f,
- 0.0f, 0.75f, 0.72f,
- 0.0f, 0.6f, 0.5f,
- 0.0f, 0.93f, 0.95f,
- 0.0f, 0.5f, 0.4f,
- 0.0f, 0.3f, 0.2f
- });
- std::vector<float> anchors({
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 100.5f, 1.0f, 1.0f
- });
- DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
-}
-
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsUint8Test, * boost::unit_test::disabled())
-{
- armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
- armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
- armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
-
- boxEncodingsInfo.SetQuantizationScale(1.0f);
- boxEncodingsInfo.SetQuantizationOffset(1);
- scoresInfo.SetQuantizationScale(0.01f);
- scoresInfo.SetQuantizationOffset(0);
- anchorsInfo.SetQuantizationScale(0.5f);
- anchorsInfo.SetQuantizationOffset(0);
-
- std::vector<float> boxEncodings({
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, -1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f
- });
- std::vector<float> scores({
- 0.0f, 0.9f, 0.8f,
- 0.0f, 0.75f, 0.72f,
- 0.0f, 0.6f, 0.5f,
- 0.0f, 0.93f, 0.95f,
- 0.0f, 0.5f, 0.4f,
- 0.0f, 0.3f, 0.2f
- });
- std::vector<float> anchors({
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 0.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 10.5f, 1.0f, 1.0f,
- 0.5f, 100.5f, 1.0f, 1.0f
- });
-
- std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
- std::vector<uint8_t> qScores(scores.size(), 0);
- std::vector<uint8_t> qAnchors(anchors.size(), 0);
- QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
- QuantizeData(qScores.data(), scores.data(), scoresInfo);
- QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
- qScores, qAnchors,
- 1.0f, 1, 0.01f, 0, 0.5f, 0);
-}
-
-BOOST_AUTO_TEST_CASE(NeonQLstmEndToEndTest)
-{
- QLstmEndToEnd(defaultBackends);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessRegularNmsUint8Test")
+//{
+// armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
+// armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
+// armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+//
+// boxEncodingsInfo.SetQuantizationScale(1.0f);
+// boxEncodingsInfo.SetQuantizationOffset(1);
+// scoresInfo.SetQuantizationScale(0.01f);
+// scoresInfo.SetQuantizationOffset(0);
+// anchorsInfo.SetQuantizationScale(0.5f);
+// anchorsInfo.SetQuantizationOffset(0);
+//
+// std::vector<float> boxEncodings({
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, -1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f
+// });
+// std::vector<float> scores({
+// 0.0f, 0.9f, 0.8f,
+// 0.0f, 0.75f, 0.72f,
+// 0.0f, 0.6f, 0.5f,
+// 0.0f, 0.93f, 0.95f,
+// 0.0f, 0.5f, 0.4f,
+// 0.0f, 0.3f, 0.2f
+// });
+// std::vector<float> anchors({
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 100.5f, 1.0f, 1.0f
+// });
+//
+// std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
+// std::vector<uint8_t> qScores(scores.size(), 0);
+// std::vector<uint8_t> qAnchors(anchors.size(), 0);
+// QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
+// QuantizeData(qScores.data(), scores.data(), scoresInfo);
+// QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
+// DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
+// qScores, qAnchors,
+// 1.0f, 1, 0.01f, 0, 0.5f, 0);
+//}
+//
+//TEST_CASE("NeonDetectionPostProcessFastNmsTest")
+//{
+// std::vector<float> boxEncodings({
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, -1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f
+// });
+// std::vector<float> scores({
+// 0.0f, 0.9f, 0.8f,
+// 0.0f, 0.75f, 0.72f,
+// 0.0f, 0.6f, 0.5f,
+// 0.0f, 0.93f, 0.95f,
+// 0.0f, 0.5f, 0.4f,
+// 0.0f, 0.3f, 0.2f
+// });
+// std::vector<float> anchors({
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 100.5f, 1.0f, 1.0f
+// });
+// DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
+// boxEncodings,
+// scores,
+// anchors);
+//}
+//
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessFastNmsUint8Test")
+//{
+// armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
+// armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
+// armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+//
+// boxEncodingsInfo.SetQuantizationScale(1.0f);
+// boxEncodingsInfo.SetQuantizationOffset(1);
+// scoresInfo.SetQuantizationScale(0.01f);
+// scoresInfo.SetQuantizationOffset(0);
+// anchorsInfo.SetQuantizationScale(0.5f);
+// anchorsInfo.SetQuantizationOffset(0);
+//
+// std::vector<float> boxEncodings({
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, -1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f,
+// 0.0f, 1.0f, 0.0f, 0.0f,
+// 0.0f, 0.0f, 0.0f, 0.0f
+// });
+// std::vector<float> scores({
+// 0.0f, 0.9f, 0.8f,
+// 0.0f, 0.75f, 0.72f,
+// 0.0f, 0.6f, 0.5f,
+// 0.0f, 0.93f, 0.95f,
+// 0.0f, 0.5f, 0.4f,
+// 0.0f, 0.3f, 0.2f
+// });
+// std::vector<float> anchors({
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 0.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 10.5f, 1.0f, 1.0f,
+// 0.5f, 100.5f, 1.0f, 1.0f
+// });
+//
+// std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
+// std::vector<uint8_t> qScores(scores.size(), 0);
+// std::vector<uint8_t> qAnchors(anchors.size(), 0);
+// QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
+// QuantizeData(qScores.data(), scores.data(), scoresInfo);
+// QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
+// DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
+// qScores, qAnchors,
+// 1.0f, 1, 0.01f, 0, 0.5f, 0);
+//}
+
+TEST_CASE("NeonQLstmEndToEndTest")
+{
+ QLstmEndToEnd(neonDefaultBackends);
+}
+
+}
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 383a5f654c..e7a56a4848 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -8,24 +8,24 @@
#include <test/GraphUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(NeonFallback)
-
-BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
+TEST_SUITE("NeonFallback")
+{
+TEST_CASE("FallbackImportToCpuAcc")
{
using namespace armnn;
// Create a mock backend objectN
MockImportBackendInitialiser initialiser; // Register the Mock Backend
auto backendObjPtr = CreateBackendObject(MockImportBackendId());
- BOOST_TEST((backendObjPtr != nullptr));
+ CHECK((backendObjPtr != nullptr));
BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
if (backendIds.find("MockRef") == backendIds.end())
{
std::string message = "Cannot load MockRef";
- BOOST_FAIL(message);
+ FAIL(message);
}
// Create runtime in which test will run and allow fallback to CpuRef.
@@ -73,12 +73,12 @@ BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -131,37 +131,37 @@ BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
// Contains ImportMemGeneric
std::size_t found = dump.find("ImportMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contains SyncMemGeneric
found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Does not contain CopyMemGeneric
found = dump.find("CopyMemGeneric");
- BOOST_TEST(found == std::string::npos);
+ CHECK(found == std::string::npos);
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemImport));
+ CHECK((layer4->GetType() == LayerType::MemImport));
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
+TEST_CASE("FallbackPaddingCopyToCpuAcc")
{
using namespace armnn;
// Create a mock backend object
MockImportBackendInitialiser initialiser; // Register the Mock Backend
auto backendObjPtr = CreateBackendObject(MockImportBackendId());
- BOOST_TEST((backendObjPtr != nullptr));
+ CHECK((backendObjPtr != nullptr));
BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
if (backendIds.find("MockRef") == backendIds.end())
{
std::string message = "Cannot load MockRef";
- BOOST_FAIL(message);
+ FAIL(message);
}
// Create runtime in which test will run and allow fallback to CpuRef.
@@ -208,11 +208,11 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -261,37 +261,37 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
// Contains CopyMemGeneric between the backends
std::size_t found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contains SyncMemGeneric for the output
found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Does not contain ImportMemGeneric
found = dump.find("ImportMemGeneric");
- BOOST_TEST(found == std::string::npos);
+ CHECK(found == std::string::npos);
// Use memory import between backends
- BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
+ CHECK((layer3->GetType() == LayerType::MemCopy));
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
+TEST_CASE("FallbackImportFromCpuAcc")
{
using namespace armnn;
// Create a mock backend object
MockImportBackendInitialiser initialiser; // Register the Mock Backend
auto backendObjPtr = CreateBackendObject(MockImportBackendId());
- BOOST_TEST((backendObjPtr != nullptr));
+ CHECK((backendObjPtr != nullptr));
BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
if (backendIds.find("MockRef") == backendIds.end())
{
std::string message = "Cannot load MockRef";
- BOOST_FAIL(message);
+ FAIL(message);
}
// Create runtime in which test will run and allow fallback to CpuRef.
@@ -339,12 +339,12 @@ BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -398,37 +398,37 @@ BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
// Contains ImportMemGeneric
std::size_t found = dump.find("ImportMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contains SyncMemGeneric
found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Does not contain CopyMemGeneric
found = dump.find("CopyMemGeneric");
- BOOST_TEST(found == std::string::npos);
+ CHECK(found == std::string::npos);
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemImport));
+ CHECK((layer4->GetType() == LayerType::MemImport));
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
+TEST_CASE("FallbackPaddingCopyFromCpuAcc")
{
using namespace armnn;
// Create a mock backend object
MockImportBackendInitialiser initialiser; // Register the Mock Backend
auto backendObjPtr = CreateBackendObject(MockImportBackendId());
- BOOST_TEST((backendObjPtr != nullptr));
+ CHECK((backendObjPtr != nullptr));
BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
if (backendIds.find("MockRef") == backendIds.end())
{
std::string message = "Cannot load MockRef";
- BOOST_FAIL(message);
+ FAIL(message);
}
// Create runtime in which test will run and allow fallback to CpuRef.
@@ -475,11 +475,11 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -528,37 +528,37 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
// Contains CopyMemGeneric between the backends
std::size_t found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contains SyncMemGeneric for the output
found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Does not contain ImportMemGeneric
found = dump.find("ImportMemGeneric");
- BOOST_TEST(found == std::string::npos);
+ CHECK(found == std::string::npos);
// Use memory import between backends
- BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
+ CHECK((layer3->GetType() == LayerType::MemCopy));
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
+TEST_CASE("FallbackDisableImportFromCpuAcc")
{
using namespace armnn;
// Create a mock backend object
MockImportBackendInitialiser initialiser; // Register the Mock Backend
auto backendObjPtr = CreateBackendObject(MockImportBackendId());
- BOOST_TEST((backendObjPtr != nullptr));
+ CHECK((backendObjPtr != nullptr));
BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
if (backendIds.find("MockRef") == backendIds.end())
{
std::string message = "Cannot load MockRef";
- BOOST_FAIL(message);
+ FAIL(message);
}
// Create runtime in which test will run and allow fallback to CpuRef.
@@ -604,12 +604,12 @@ BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -663,21 +663,21 @@ BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
// Contains CopyMemGeneric between the backends
std::size_t found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Does not contain ImportMemGeneric
found = dump.find("ImportMemGeneric");
- BOOST_TEST(found == std::string::npos);
+ CHECK(found == std::string::npos);
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+ CHECK((layer4->GetType() == LayerType::MemCopy));
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
#if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
+TEST_CASE("NeonImportEnabledFallbackToCl")
{
using namespace armnn;
@@ -728,18 +728,18 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+ CHECK((layer4->GetType() == LayerType::MemCopy));
// Correctly use backend hint
- BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+ CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -779,7 +779,7 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
size_t space = totalBytes + alignment + alignment;
auto inputData = std::make_unique<uint8_t[]>(space);
void* alignedInputPtr = inputData.get();
- BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+ CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
std::copy(inputData2.begin(), inputData2.end(), intputPtr);
@@ -808,21 +808,21 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
// Executed Subtraction using GpuAcc
std::size_t found = dump.find("ClSubtractionWorkload_Execute");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contain CopyMemGeneric
found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Check output is as expected
for(unsigned int i = 0; i < numElements; ++i)
{
- BOOST_TEST(outputData[i] == expectedOutput[i]);
+ CHECK(outputData[i] == expectedOutput[i]);
}
runtime->UnloadNetwork(netId);
}
-BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
+TEST_CASE("NeonImportDisabledFallbackToCl")
{
using namespace armnn;
@@ -872,18 +872,18 @@ BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+ CHECK((layer4->GetType() == LayerType::MemCopy));
// Correctly use backend hint
- BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+ CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -934,17 +934,17 @@ BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
// Executed Subtraction using GpuAcc
std::size_t found = dump.find("ClSubtractionWorkload_Execute");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contain CopyMemGeneric
found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
+TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
{
using namespace armnn;
@@ -1007,21 +1007,21 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
- BOOST_TEST(CheckOrder(graph, layer6, layer7));
- BOOST_TEST(CheckOrder(graph, layer7, layer8));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer6, layer7));
+ CHECK(CheckOrder(graph, layer7, layer8));
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
- BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+ CHECK((layer4->GetType() == LayerType::MemCopy));
+ CHECK((layer6->GetType() == LayerType::MemCopy));
// Correctly use backend hint
- BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+ CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -1056,7 +1056,7 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
size_t space = totalBytes + alignment + alignment;
auto inputData = std::make_unique<uint8_t[]>(space);
void* alignedInputPtr = inputData.get();
- BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+ CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
std::copy(inputData2.begin(), inputData2.end(), intputPtr);
@@ -1085,26 +1085,26 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
// Executed Subtraction using GpuAcc
std::size_t found = dump.find("ClSubtractionWorkload_Execute");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Correctly switch back to CpuAcc
found = dump.find("NeonPooling2dWorkload_Execute");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contain CopyMemGeneric
found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contains SyncMemGeneric for output
found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
runtime->UnloadNetwork(netId);
}
-BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
+TEST_CASE("NeonImportDisableFallbackSubgraphToCl")
{
using namespace armnn;
@@ -1162,21 +1162,21 @@ BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
// Checks order is valid.
- BOOST_TEST(CheckOrder(graph, layer0, layer1));
- BOOST_TEST(CheckOrder(graph, layer1, layer2));
- BOOST_TEST(CheckOrder(graph, layer2, layer3));
- BOOST_TEST(CheckOrder(graph, layer3, layer4));
- BOOST_TEST(CheckOrder(graph, layer4, layer5));
- BOOST_TEST(CheckOrder(graph, layer5, layer6));
- BOOST_TEST(CheckOrder(graph, layer6, layer7));
- BOOST_TEST(CheckOrder(graph, layer7, layer8));
+ CHECK(CheckOrder(graph, layer0, layer1));
+ CHECK(CheckOrder(graph, layer1, layer2));
+ CHECK(CheckOrder(graph, layer2, layer3));
+ CHECK(CheckOrder(graph, layer3, layer4));
+ CHECK(CheckOrder(graph, layer4, layer5));
+ CHECK(CheckOrder(graph, layer5, layer6));
+ CHECK(CheckOrder(graph, layer6, layer7));
+ CHECK(CheckOrder(graph, layer7, layer8));
// Use memory import between backends
- BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
- BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+ CHECK((layer4->GetType() == LayerType::MemCopy));
+ CHECK((layer6->GetType() == LayerType::MemCopy));
// Correctly use backend hint
- BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+ CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
// Load it into the runtime. It should pass.
NetworkId netId;
@@ -1224,19 +1224,19 @@ BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
// Executed Subtraction using GpuAcc
std::size_t found = dump.find("ClSubtractionWorkload_Execute");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Correctly switch back to CpuAcc
found = dump.find("NeonPooling2dWorkload_Execute");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Contain CopyMemGeneric
found = dump.find("CopyMemGeneric");
- BOOST_TEST(found != std::string::npos);
+ CHECK(found != std::string::npos);
// Check output is as expected
- BOOST_TEST(outputData == expectedOutput);
+ CHECK(outputData == expectedOutput);
}
#endif
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonJsonPrinterTests.cpp b/src/backends/neon/test/NeonJsonPrinterTests.cpp
index a8d90fd509..6139c75ce2 100644
--- a/src/backends/neon/test/NeonJsonPrinterTests.cpp
+++ b/src/backends/neon/test/NeonJsonPrinterTests.cpp
@@ -7,16 +7,16 @@
#include <backendsCommon/test/JsonPrinterTestImpl.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <vector>
-BOOST_AUTO_TEST_SUITE(NeonJsonPrinter)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuAccTest)
+TEST_SUITE("NeonJsonPrinter")
+{
+TEST_CASE("SoftmaxProfilerJsonPrinterCpuAccTest")
{
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
RunSoftmaxProfilerJsonPrinterTest(backends);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index 13a4c732a4..494c8f927f 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -14,132 +14,132 @@
#include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
#include <backendsCommon/test/LayerTests.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <string>
-BOOST_AUTO_TEST_SUITE(NeonLayerSupport)
-
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Neon)
+TEST_SUITE("NeonLayerSupport")
+{
+TEST_CASE("IsLayerSupportedFloat16Neon")
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float16>(&factory);
}
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon)
+TEST_CASE("IsLayerSupportedFloat32Neon")
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float32>(&factory);
}
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQAsymmU8Neon)
+TEST_CASE("IsLayerSupportedQAsymmU8Neon")
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQAsymmS8Neon)
+TEST_CASE("IsLayerSupportedQAsymmS8Neon")
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmS8>(&factory);
}
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQSymmS8Neon)
+TEST_CASE("IsLayerSupportedQSymmS8Neon")
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
}
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
+TEST_CASE("IsConvertFp16ToFp32SupportedNeon")
{
std::string reasonIfUnsupported;
bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
}
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon)
+TEST_CASE("IsConvertFp32ToFp16SupportedNeon")
{
std::string reasonIfUnsupported;
bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
}
-BOOST_AUTO_TEST_CASE(IsLogicalBinarySupportedNeon)
+TEST_CASE("IsLogicalBinarySupportedNeon")
{
std::string reasonIfUnsupported;
bool result = IsLogicalBinaryLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
}
-BOOST_AUTO_TEST_CASE(IsLogicalBinaryBroadcastSupportedNeon)
+TEST_CASE("IsLogicalBinaryBroadcastSupportedNeon")
{
std::string reasonIfUnsupported;
bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
}
-BOOST_AUTO_TEST_CASE(IsMeanSupportedNeon)
+TEST_CASE("IsMeanSupportedNeon")
{
std::string reasonIfUnsupported;
bool result = IsMeanLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
}
-BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon)
+TEST_CASE("IsConstantSupportedNeon")
{
std::string reasonIfUnsupported;
bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::Float16>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::Float32>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::QAsymmU8>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::Boolean>(reasonIfUnsupported);
- BOOST_CHECK(!result);
+ CHECK(!result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::QSymmS16>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::QSymmS8>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::QAsymmS8>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
armnn::DataType::BFloat16>(reasonIfUnsupported);
- BOOST_CHECK(result);
+ CHECK(result);
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d12817e159..edc8cb995c 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -14,10 +14,10 @@
#include <backendsCommon/test/ActivationFixture.hpp>
#include <backendsCommon/test/LayerTests.hpp>
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon)
+#include <doctest/doctest.h>
+TEST_SUITE("Compute_ArmComputeNeon")
+{
using namespace armnn;
using FactoryType = NeonWorkloadFactory;
@@ -308,7 +308,7 @@ TensorInfo CreateOutputTensorInfo(const TensorInfo& inputInfo,
}
}
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils)
+TEST_CASE("DepthwiseConv2dUtils")
{
const DataType dataType = DataType::Float32;
@@ -323,73 +323,73 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils)
// Strides supported: 1,2,3
descriptor = MakeDepthwiseConv2dDesc(1, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(1, 2);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(1, 3);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(2, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(2, 2);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(2, 3);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(3, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(3, 2);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
descriptor = MakeDepthwiseConv2dDesc(3, 3);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
// Supported stride 4
descriptor = MakeDepthwiseConv2dDesc(4, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
// Supported weights shape 1x1
TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, DataType::Float32);
descriptor = MakeDepthwiseConv2dDesc(1, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo1x1, biasesInfo));
// Supported shape 2x2
TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, DataType::Float32);
descriptor = MakeDepthwiseConv2dDesc(1, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo2x2, biasesInfo));
// Asymmetric padding
descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
- BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+ CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
weightsInfo3x3, biasesInfo));
}
@@ -1498,4 +1498,4 @@ ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(CompareEluActivationWithReference,
#endif
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
index 1fef43957d..5a65b155ef 100644
--- a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
+++ b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
@@ -8,8 +8,10 @@
#include <neon/NeonWorkloadFactory.hpp>
#include <test/UnitTests.hpp>
-BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon)
+#include <doctest/doctest.h>
+TEST_SUITE("Compute_ArmComputeNeon")
+{
using namespace armnn;
using FactoryType = NeonWorkloadFactory;
@@ -44,4 +46,4 @@ ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareSoftmaxBeta2WithReference, Comp
#endif
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index 2bb9e3d431..048509224b 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -12,44 +12,44 @@
#include <reference/RefWorkloadFactory.hpp>
#include <reference/test/RefWorkloadFactoryHelper.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(NeonMemCopy)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon)
+TEST_SUITE("NeonMemCopy")
+{
+TEST_CASE("CopyBetweenCpuAndNeon")
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
result.m_ActualShape, result.m_ExpectedShape);
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
+TEST_CASE("CopyBetweenNeonAndCpu")
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
result.m_ActualShape, result.m_ExpectedShape);
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
+TEST_CASE("CopyBetweenCpuAndNeonWithSubtensors")
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
result.m_ActualShape, result.m_ExpectedShape);
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
+TEST_CASE("CopyBetweenNeonAndCpuWithSubtensors")
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
result.m_ActualShape, result.m_ExpectedShape);
- BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 4944c31d71..9b448b270d 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -10,11 +10,11 @@
#include <neon/NeonWorkloadFactory.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
+TEST_SUITE("NeonOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateCpuAccDeviceSupportLayerNoFallback")
{
// build up the structure of the network
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -30,7 +30,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- BOOST_CHECK(optNet);
+ CHECK(optNet);
// validate workloads
armnn::NeonWorkloadFactory fact =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -38,13 +38,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
armnn::Graph& graph = GetGraphForTesting(optNet.get());
for (auto&& layer : graph)
{
- BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
- BOOST_CHECK_NO_THROW(
+ CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+ CHECK_NOTHROW(
layer->CreateWorkload(fact));
}
}
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
{
// build up the structure of the network
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -72,16 +72,16 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
try
{
Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
- BOOST_FAIL("Should have thrown an exception.");
+ FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException& e)
{
// Different exceptions are thrown on different backends
}
- BOOST_CHECK(errMessages.size() > 0);
+ CHECK(errMessages.size() > 0);
}
-BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
+TEST_CASE("FastMathEnabledTestOnCpuAcc")
{
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -102,16 +102,16 @@ BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
- BOOST_CHECK(optimizedNet);
+ CHECK(optimizedNet);
auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
- BOOST_TEST(modelOptionsOut.size() == 1);
- BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
- BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+ CHECK(modelOptionsOut.size() == 1);
+ CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+ CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
}
-BOOST_AUTO_TEST_CASE(NumberOfThreadsTestOnCpuAcc)
+TEST_CASE("NumberOfThreadsTestOnCpuAcc")
{
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -134,15 +134,15 @@ BOOST_AUTO_TEST_CASE(NumberOfThreadsTestOnCpuAcc)
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
- BOOST_CHECK(optimizedNet);
+ CHECK(optimizedNet);
std::unique_ptr<armnn::Graph> graphPtr;
armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
auto modelOptionsOut = impl.GetModelOptions();
- BOOST_TEST(modelOptionsOut.size() == 1);
- BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
- BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
+ CHECK(modelOptionsOut.size() == 1);
+ CHECK(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
+ CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/backends/neon/test/NeonRuntimeTests.cpp b/src/backends/neon/test/NeonRuntimeTests.cpp
index 27361dd43d..ee5666d5cd 100644
--- a/src/backends/neon/test/NeonRuntimeTests.cpp
+++ b/src/backends/neon/test/NeonRuntimeTests.cpp
@@ -10,11 +10,11 @@
#include <backendsCommon/test/RuntimeTestImpl.hpp>
#include <test/ProfilingTestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
-BOOST_AUTO_TEST_SUITE(NeonRuntime)
-
-BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
+TEST_SUITE("NeonRuntime")
+{
+TEST_CASE("RuntimeValidateCpuAccDeviceSupportLayerNoFallback")
{
// build up the structure of the network
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -30,17 +30,17 @@ BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- BOOST_CHECK(optNet);
+ CHECK(optNet);
// Load it into the runtime. It should success.
armnn::NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+ CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
}
#ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
+TEST_CASE("RuntimeMemoryLeaksCpuAcc")
{
- BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+ CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
armnn::IRuntime::CreationOptions options;
armnn::RuntimeImpl runtime(options);
armnn::RuntimeLoadedNetworksReserve(&runtime);
@@ -54,21 +54,21 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
{
ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
// In the second run we check for all remaining memory
// in use after the network was unloaded. If there is any
// then it will be treated as a memory leak.
CreateAndDropDummyNetwork(backends, runtime);
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
- BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+ CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+ CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+ CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
}
}
#endif
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureCpuAcc)
+TEST_CASE("ProfilingPostOptimisationStructureCpuAcc")
{
VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuAcc);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+} \ No newline at end of file
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 0e24e9505b..eabf3c8e9a 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -15,13 +15,14 @@
#include <arm_compute/runtime/Allocator.h>
#include <backendsCommon/test/CommonTestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <armnn/utility/Assert.hpp>
-BOOST_AUTO_TEST_SUITE(NeonTensorHandleTests)
+TEST_SUITE("NeonTensorHandleTests")
+{
using namespace armnn;
-BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesNoPadding)
+TEST_CASE("NeonTensorHandleGetCapabilitiesNoPadding")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
NeonTensorHandleFactory handleFactory(memoryManager);
@@ -43,18 +44,18 @@ BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesNoPadding)
std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
softmax,
CapabilityClass::PaddingRequired);
- BOOST_TEST(capabilities.empty());
+ CHECK(capabilities.empty());
// No padding required for Softmax
capabilities = handleFactory.GetCapabilities(softmax, output, CapabilityClass::PaddingRequired);
- BOOST_TEST(capabilities.empty());
+ CHECK(capabilities.empty());
// No padding required for output
capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
- BOOST_TEST(capabilities.empty());
+ CHECK(capabilities.empty());
}
-BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesPadding)
+TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
NeonTensorHandleFactory handleFactory(memoryManager);
@@ -75,20 +76,20 @@ BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesPadding)
std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
pooling,
CapabilityClass::PaddingRequired);
- BOOST_TEST(capabilities.empty());
+ CHECK(capabilities.empty());
// No padding required for output
capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
- BOOST_TEST(capabilities.empty());
+ CHECK(capabilities.empty());
// Padding required for Pooling2d
capabilities = handleFactory.GetCapabilities(pooling, output, CapabilityClass::PaddingRequired);
- BOOST_TEST(capabilities.size() == 1);
- BOOST_TEST((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
- BOOST_TEST(capabilities[0].m_Value);
+ CHECK(capabilities.size() == 1);
+ CHECK((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
+ CHECK(capabilities[0].m_Value);
}
-BOOST_AUTO_TEST_CASE(ConcatOnXorYSubTensorsNoPaddingRequiredTest)
+TEST_CASE("ConcatOnXorYSubTensorsNoPaddingRequiredTest")
{
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -163,7 +164,7 @@ BOOST_AUTO_TEST_CASE(ConcatOnXorYSubTensorsNoPaddingRequiredTest)
}
}
-BOOST_AUTO_TEST_CASE(ConcatonXorYPaddingRequiredTest)
+TEST_CASE("ConcatonXorYPaddingRequiredTest")
{
armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -246,7 +247,7 @@ BOOST_AUTO_TEST_CASE(ConcatonXorYPaddingRequiredTest)
ARMNN_ASSERT(numberOfSubTensors == 0);
}
-BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
+TEST_CASE("SplitteronXorYNoPaddingRequiredTest")
{
using namespace armnn;
@@ -443,14 +444,14 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
std::vector<float> out = outputStorage.at(it.first);
for (unsigned int i = 0; i < out.size(); ++i)
{
- BOOST_CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+ CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
"Actual output: " << out[i] << ". Expected output:" << it.second[i]);
}
}
}
-BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
+TEST_CASE("SplitteronXorYPaddingRequiredTest")
{
using namespace armnn;
@@ -618,14 +619,14 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
std::vector<float> out = outputStorage.at(it.first);
for (unsigned int i = 0; i < out.size(); ++i)
{
- BOOST_CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+ CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
"Actual output: " << out[i] << ". Expected output:" << it.second[i]);
}
}
}
-BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryMemoryManaged)
+TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
std::make_unique<arm_compute::Allocator>(),
@@ -641,31 +642,31 @@ BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryMemoryManaged)
memoryManager->Acquire();
{
float* buffer = reinterpret_cast<float*>(handle->Map());
- BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+ CHECK(buffer != nullptr); // Yields a valid pointer
buffer[0] = 1.5f;
buffer[1] = 2.5f;
- BOOST_CHECK(buffer[0] == 1.5f); // Memory is writable and readable
- BOOST_CHECK(buffer[1] == 2.5f); // Memory is writable and readable
+ CHECK(buffer[0] == 1.5f); // Memory is writable and readable
+ CHECK(buffer[1] == 2.5f); // Memory is writable and readable
}
memoryManager->Release();
memoryManager->Acquire();
{
float* buffer = reinterpret_cast<float*>(handle->Map());
- BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+ CHECK(buffer != nullptr); // Yields a valid pointer
buffer[0] = 3.5f;
buffer[1] = 4.5f;
- BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
- BOOST_CHECK(buffer[1] == 4.5f); // Memory is writable and readable
+ CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+ CHECK(buffer[1] == 4.5f); // Memory is writable and readable
}
memoryManager->Release();
float testPtr[2] = { 2.5f, 5.5f };
// Cannot import as import is disabled
- BOOST_CHECK_THROW(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
+ CHECK_THROWS_AS(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
}
-BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryImport)
+TEST_CASE("NeonTensorHandleFactoryImport")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
std::make_unique<arm_compute::Allocator>(),
@@ -680,25 +681,25 @@ BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryImport)
memoryManager->Acquire();
// No buffer allocated when import is enabled
- BOOST_CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
+ CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
float testPtr[2] = { 2.5f, 5.5f };
// Correctly import
- BOOST_CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+ CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
float* buffer = reinterpret_cast<float*>(handle->Map());
- BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
- BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+ CHECK(buffer != nullptr); // Yields a valid pointer after import
+ CHECK(buffer == testPtr); // buffer is pointing to testPtr
// Memory is writable and readable with correct value
- BOOST_CHECK(buffer[0] == 2.5f);
- BOOST_CHECK(buffer[1] == 5.5f);
+ CHECK(buffer[0] == 2.5f);
+ CHECK(buffer[1] == 5.5f);
buffer[0] = 3.5f;
buffer[1] = 10.0f;
- BOOST_CHECK(buffer[0] == 3.5f);
- BOOST_CHECK(buffer[1] == 10.0f);
+ CHECK(buffer[0] == 3.5f);
+ CHECK(buffer[1] == 10.0f);
memoryManager->Release();
}
-BOOST_AUTO_TEST_CASE(NeonTensorHandleSupportsInPlaceComputation)
+TEST_CASE("NeonTensorHandleSupportsInPlaceComputation")
{
std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
NeonTensorHandleFactory handleFactory(memoryManager);
@@ -707,4 +708,4 @@ BOOST_AUTO_TEST_CASE(NeonTensorHandleSupportsInPlaceComputation)
ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
}
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index df014d5a9b..d2bb97c056 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -18,23 +18,23 @@
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
#include <cstdlib>
#include <algorithm>
using namespace armnn;
-BOOST_AUTO_TEST_SUITE(NeonTimerInstrument)
-
+TEST_SUITE("NeonTimerInstrument")
+{
-BOOST_AUTO_TEST_CASE(NeonTimerGetName)
+TEST_CASE("NeonTimerGetName")
{
NeonTimer neonTimer;
- BOOST_CHECK_EQUAL(neonTimer.GetName(), "NeonKernelTimer");
+ CHECK_EQ(std::string(neonTimer.GetName()), "NeonKernelTimer");
}
-BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
+TEST_CASE("NeonTimerMeasure")
{
NeonWorkloadFactory workloadFactory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -95,19 +95,19 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
std::vector<Measurement> measurements = neonTimer.GetMeasurements();
- BOOST_CHECK(measurements.size() <= 2);
+ CHECK(measurements.size() <= 2);
if (measurements.size() > 1)
{
- BOOST_CHECK_EQUAL(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
- BOOST_CHECK(measurements[0].m_Value > 0.0);
+ CHECK_EQ(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
+ CHECK(measurements[0].m_Value > 0.0);
}
std::ostringstream oss_neon;
std::ostringstream oss_cpu;
oss_neon << "NeonKernelTimer/" << measurements.size()-1 << ": NEActivationLayerKernel";
oss_cpu << "NeonKernelTimer/" << measurements.size()-1 << ": CpuActivationKernel";
- BOOST_CHECK(measurements[measurements.size()-1].m_Name == oss_neon.str() ||
- measurements[measurements.size()-1].m_Name == oss_cpu.str());
- BOOST_CHECK(measurements[measurements.size()-1].m_Value > 0.0);
+ CHECK((measurements[measurements.size()-1].m_Name == oss_neon.str() ||
+ measurements[measurements.size()-1].m_Name == oss_cpu.str()));
+ CHECK(measurements[measurements.size()-1].m_Value > 0.0);
}
-BOOST_AUTO_TEST_SUITE_END()
+}