From 1625efc870f1a8b7c6e6382277ddbb245f91a294 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Thu, 10 Jun 2021 18:24:34 +0100 Subject: IVGCVSW-5963 'Move unit tests to new framework' * Used doctest in ArmNN unit tests Signed-off-by: Sadik Armagan Change-Id: Ia9cf5fc72775878885c5f864abf2c56b3a935f1a --- src/backends/reference/test/ArgMinMaxTests.cpp | 22 +- src/backends/reference/test/CMakeLists.txt | 1 + .../reference/test/RefCreateWorkloadTests.cpp | 300 ++++++++-------- .../test/RefDetectionPostProcessTests.cpp | 80 ++--- src/backends/reference/test/RefEndToEndTests.cpp | 395 ++++++++++----------- .../reference/test/RefJsonPrinterTests.cpp | 10 +- .../reference/test/RefLayerSupportTests.cpp | 119 ++++--- src/backends/reference/test/RefLayerTests.cpp | 49 ++- .../reference/test/RefMemoryManagerTests.cpp | 25 +- .../reference/test/RefOptimizedNetworkTests.cpp | 36 +- src/backends/reference/test/RefRuntimeTests.cpp | 19 +- .../reference/test/RefTensorHandleTests.cpp | 109 +++--- 12 files changed, 576 insertions(+), 589 deletions(-) (limited to 'src/backends/reference/test') diff --git a/src/backends/reference/test/ArgMinMaxTests.cpp b/src/backends/reference/test/ArgMinMaxTests.cpp index dce15b29ef..b79a108129 100644 --- a/src/backends/reference/test/ArgMinMaxTests.cpp +++ b/src/backends/reference/test/ArgMinMaxTests.cpp @@ -5,11 +5,11 @@ #include -#include +#include -BOOST_AUTO_TEST_SUITE(RefArgMinMax) - -BOOST_AUTO_TEST_CASE(ArgMinTest) +TEST_SUITE("RefArgMinMax") +{ +TEST_CASE("ArgMinTest") { const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64); @@ -25,14 +25,11 @@ BOOST_AUTO_TEST_CASE(ArgMinTest) armnn::ArgMinMaxFunction::Min, -2); - BOOST_CHECK_EQUAL_COLLECTIONS(outputValues.begin(), - outputValues.end(), - expectedValues.begin(), - expectedValues.end()); + CHECK(std::equal(outputValues.begin(), outputValues.end(), expectedValues.begin(), expectedValues.end())); } -BOOST_AUTO_TEST_CASE(ArgMaxTest) +TEST_CASE("ArgMaxTest") { const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64); @@ -48,11 +45,8 @@ BOOST_AUTO_TEST_CASE(ArgMaxTest) armnn::ArgMinMaxFunction::Max, -2); - BOOST_CHECK_EQUAL_COLLECTIONS(outputValues.begin(), - outputValues.end(), - expectedValues.begin(), - expectedValues.end()); + CHECK(std::equal(outputValues.begin(), outputValues.end(), expectedValues.begin(), expectedValues.end())); } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +} \ No newline at end of file diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt index c71c9d70be..76541cfdaa 100644 --- a/src/backends/reference/test/CMakeLists.txt +++ b/src/backends/reference/test/CMakeLists.txt @@ -24,3 +24,4 @@ target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling) target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include) +target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party/doctest) diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 0f86e7eeff..4293ef54f3 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -10,6 +10,8 @@ #include #include +#include + namespace { @@ -19,8 +21,8 @@ void CheckInputOutput(std::unique_ptr workload, const TensorInfo& inpu auto queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); + CHECK((inputHandle->GetTensorInfo() == inputInfo)); + CHECK((outputHandle->GetTensorInfo() == outputInfo)); } template @@ -33,9 +35,9 @@ void CheckInputsOutput(std::unique_ptr workload, auto inputHandle0 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0)); - BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); + CHECK((inputHandle0->GetTensorInfo() == inputInfo0)); + CHECK((inputHandle1->GetTensorInfo() == inputInfo1)); + CHECK((outputHandle->GetTensorInfo() == outputInfo)); } armnn::RefWorkloadFactory GetFactory() @@ -47,8 +49,8 @@ armnn::RefWorkloadFactory GetFactory() } -BOOST_AUTO_TEST_SUITE(CreateWorkloadRef) - +TEST_SUITE("CreateWorkloadRef") +{ template static void RefCreateActivationWorkloadTest() { @@ -62,12 +64,12 @@ static void RefCreateActivationWorkloadTest() TensorInfo({ 1, 1 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload) +TEST_CASE("CreateActivationFloat32Workload") { RefCreateActivationWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload) +TEST_CASE("CreateActivationUint8Workload") { RefCreateActivationWorkloadTest(); } @@ -89,7 +91,7 @@ static void RefCreateElementwiseWorkloadTest() TensorInfo({ 2, 3 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest) +TEST_CASE("CreateSubtractionWorkloadWithBlobTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -106,7 +108,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest) TensorInfo({ 2, 3 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest) +TEST_CASE("CreateAdditionWorkloadWithBlobTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -122,7 +124,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest) TensorInfo({ 2, 3 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest) +TEST_CASE("CreateMultiplicationWorkloadWithBlobTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -138,7 +140,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest) TensorInfo({2, 3}, DataType)); } -BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) +TEST_CASE("CreateAdditionFloatWorkload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -146,7 +148,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) +TEST_CASE("CreateAdditionUint8Workload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -154,7 +156,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload) +TEST_CASE("CreateAdditionInt16Workload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -162,7 +164,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload) +TEST_CASE("CreateAdditionInt32Workload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -170,7 +172,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload) armnn::DataType::Signed32>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload) +TEST_CASE("CreateSubtractionFloat32Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -178,7 +180,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) +TEST_CASE("CreateSubtractionFloat16Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -186,7 +188,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) armnn::DataType::Float16>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) +TEST_CASE("CreateSubtractionUint8Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -194,7 +196,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload) +TEST_CASE("CreateSubtractionInt16Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -202,7 +204,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload) +TEST_CASE("CreateSubtractionInt32Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -210,7 +212,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload) armnn::DataType::Signed32>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) +TEST_CASE("CreateMultiplicationFloatWorkload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -218,7 +220,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) +TEST_CASE("CreateMultiplicationUint8Workload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -226,7 +228,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload) +TEST_CASE("CreateMultiplicationInt16Workload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -234,7 +236,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload) +TEST_CASE("CreateMultiplicationInt32Workload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -242,7 +244,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload) armnn::DataType::Signed32>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload) +TEST_CASE("CreateDivisionFloat32Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -250,7 +252,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload) +TEST_CASE("CreateDivisionFloat16Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -258,7 +260,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload) armnn::DataType::Float16>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) +TEST_CASE("CreateDivisionUint8Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -266,7 +268,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload) +TEST_CASE("CreateDivisionInt16Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -274,7 +276,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload) +TEST_CASE("CreateDivisionInt32Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -311,7 +313,7 @@ static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload) +TEST_CASE("CreateBatchNormalizationWithBlobFloat32Workload") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -329,55 +331,55 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload) CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType)); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload) +TEST_CASE("CreateBatchNormalizationFloat32Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationFloat32WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload) +TEST_CASE("CreateBatchNormalizationFloat16Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationFloat16WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload) +TEST_CASE("CreateBatchNormalizationUint8Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationUint8WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload) +TEST_CASE("CreateBatchNormalizationInt16Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationInt16WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload) +TEST_CASE("CreateConvertFp16ToFp32Float32Workload") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -388,7 +390,7 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload) std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32)); } -BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload) +TEST_CASE("CreateConvertFp32ToFp16Float16Workload") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -417,17 +419,17 @@ static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayou TensorInfo(outputShape, DataType::Float32)); } -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload) +TEST_CASE("CreateConvolution2dFloatNchwWorkload") { RefCreateConvolution2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload) +TEST_CASE("CreateConvolution2dFloatNhwcWorkload") { RefCreateConvolution2dWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload) +TEST_CASE("CreateConvolution2dWithBlobWorkload") { DataLayout dataLayout = DataLayout::NHWC; Graph graph; @@ -464,12 +466,12 @@ static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout) TensorInfo(outputShape, DataType::Float32)); } -BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload) +TEST_CASE("CreateDepthwiseConvolutionFloat32NhwcWorkload") { RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest) +TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -499,17 +501,17 @@ static void RefCreateFullyConnectedWorkloadTest() TensorInfo({ 3, 7 }, DataType, outputQScale)); } -BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32) +TEST_CASE("CreateFullyConnectedWorkloadFloat32") { RefCreateFullyConnectedWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8) +TEST_CASE("CreateFullyConnectedWorkloadQuantisedAsymm8") { RefCreateFullyConnectedWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16) +TEST_CASE("CreateFullyConnectedWorkloadQuantisedSymm16") { RefCreateFullyConnectedWorkloadTest(); } @@ -541,32 +543,32 @@ static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout) CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload) +TEST_CASE("CreateRefNormalizationFloat32NchwWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload) +TEST_CASE("CreateRefNormalizationFloat32NhwcWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload) +TEST_CASE("CreateRefNormalizationUint8NchwWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload) +TEST_CASE("CreateRefNormalizationUint8NhwcWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload) +TEST_CASE("CreateRefNormalizationInt16NchwWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload) +TEST_CASE("CreateRefNormalizationInt16NhwcWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NHWC); } @@ -599,32 +601,32 @@ static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout) TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload) +TEST_CASE("CreatePooling2dFloat32Workload") { RefCreatePooling2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload) +TEST_CASE("CreatePooling2dFloat32NhwcWorkload") { RefCreatePooling2dWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload) +TEST_CASE("CreatePooling2dUint8Workload") { RefCreatePooling2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload) +TEST_CASE("CreatePooling2dUint8NhwcWorkload") { RefCreatePooling2dWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload) +TEST_CASE("CreatePooling2dInt16Workload") { RefCreatePooling2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload) +TEST_CASE("CreatePooling2dInt16NhwcWorkload") { RefCreatePooling2dWorkloadTest(DataLayout::NHWC); } @@ -655,22 +657,22 @@ static void RefCreateSoftmaxWorkloadTest() tensorInfo); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) +TEST_CASE("CreateSoftmaxFloat32Workload") { RefCreateSoftmaxWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload) +TEST_CASE("CreateSoftmaxFloat16Workload") { RefCreateSoftmaxWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload) +TEST_CASE("CreateSoftmaxQuantisedAsymm8Workload") { RefCreateSoftmaxWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload) +TEST_CASE("CreateSoftmaxQuantisedSymm16Workload") { RefCreateSoftmaxWorkloadTest(); } @@ -685,29 +687,29 @@ static void RefCreateSplitterWorkloadTest() // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); - BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); + CHECK((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); + CHECK((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); - BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); + CHECK((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); - BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); + CHECK((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); } -BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload) +TEST_CASE("CreateSplitterFloat32Workload") { RefCreateSplitterWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload) +TEST_CASE("CreateSplitterFloat16Workload") { RefCreateSplitterWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) +TEST_CASE("CreateSplitterUint8Workload") { RefCreateSplitterWorkloadTest(); } @@ -735,27 +737,27 @@ static void RefCreateSplitterConcatWorkloadTest() armnn::RefTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); armnn::RefTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(mIn0); - BOOST_TEST(mIn1); + CHECK(sOut0); + CHECK(sOut1); + CHECK(mIn0); + CHECK(mIn1); bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); - BOOST_TEST(validDataPointers); + CHECK(validDataPointers); } -BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32) +TEST_CASE("CreateSplitterConcatFloat32") { RefCreateSplitterConcatWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16) +TEST_CASE("CreateSplitterConcatFloat16") { RefCreateSplitterConcatWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8) +TEST_CASE("CreateSplitterConcatUint8") { RefCreateSplitterConcatWorkloadTest(); } @@ -785,26 +787,26 @@ static void RefCreateSingleOutputMultipleInputsTest() armnn::RefTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(activ0_0Im); - BOOST_TEST(activ0_1Im); - BOOST_TEST(activ1_0Im); - BOOST_TEST(activ1_1Im); + CHECK(sOut0); + CHECK(sOut1); + CHECK(activ0_0Im); + CHECK(activ0_1Im); + CHECK(activ1_0Im); + CHECK(activ1_1Im); bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); - BOOST_TEST(validDataPointers); + CHECK(validDataPointers); } -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32) +TEST_CASE("CreateSingleOutputMultipleInputsFloat32") { RefCreateSingleOutputMultipleInputsTest(); } -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8) +TEST_CASE("CreateSingleOutputMultipleInputsUint8") { RefCreateSingleOutputMultipleInputsTest(); @@ -838,27 +840,27 @@ static void RefCreateResizeBilinearTest(DataLayout dataLayout) TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) +TEST_CASE("CreateResizeBilinearFloat32") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16) +TEST_CASE("CreateResizeBilinearFloat16") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) +TEST_CASE("CreateResizeBilinearUint8") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16) +TEST_CASE("CreateResizeBilinearQuantisedAsymm16") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc) +TEST_CASE("CreateResizeBilinearFloat32Nhwc") { RefCreateResizeBilinearTest(DataLayout::NHWC); } @@ -876,22 +878,22 @@ static void RefCreateBatchToSpaceNdTest() TensorInfo({ 1, 1, 1, 1 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32) +TEST_CASE("CreateBatchToSpaceNdFloat32") { RefCreateBatchToSpaceNdTest(); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16) +TEST_CASE("CreateBatchToSpaceNdFloat16") { RefCreateBatchToSpaceNdTest(); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8) +TEST_CASE("CreateBatchToSpaceNdUint8") { RefCreateBatchToSpaceNdTest(); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16) +TEST_CASE("CreateBatchToSpaceNdQSymm16") { RefCreateBatchToSpaceNdTest(); } @@ -924,32 +926,32 @@ static void RefCreateL2NormalizationTest(DataLayout dataLayout) CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32) +TEST_CASE("CreateL2NormalizationFloat32") { RefCreateL2NormalizationTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc) +TEST_CASE("CreateL2NormalizationFloat32Nhwc") { RefCreateL2NormalizationTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16) +TEST_CASE("CreateL2NormalizationInt16") { RefCreateL2NormalizationTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc) +TEST_CASE("CreateL2NormalizationInt16Nhwc") { RefCreateL2NormalizationTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8) +TEST_CASE("CreateL2NormalizationUint8") { RefCreateL2NormalizationTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc) +TEST_CASE("CreateL2NormalizationUint8Nhwc") { RefCreateL2NormalizationTest(DataLayout::NHWC); } @@ -968,17 +970,17 @@ static void RefCreateReshapeWorkloadTest() TensorInfo({ 1, 4 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32) +TEST_CASE("CreateReshapeWorkloadFloat32") { RefCreateReshapeWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8) +TEST_CASE("CreateReshapeWorkloadQuantisedAsymm8") { RefCreateReshapeWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16) +TEST_CASE("CreateReshapeWorkloadQuantisedSymm16") { RefCreateReshapeWorkloadTest(); } @@ -997,52 +999,52 @@ static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape, TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) +TEST_CASE("CreateConcatDim0Float32Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload) +TEST_CASE("CreateConcatDim0Float16Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) +TEST_CASE("CreateConcatDim0Uint8Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload) +TEST_CASE("CreateConcatDim0Uint16Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload) +TEST_CASE("CreateConcatDim1Float32Workload") { RefCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload) +TEST_CASE("CreateConcatDim1Uint8Workload") { RefCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload) +TEST_CASE("CreateConcatDim2Float32Workload") { RefCreateConcatWorkloadTest({ 2, 3, 4, 5 }, 2); } -BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload) +TEST_CASE("CreateConcatDim2Uint8Workload") { RefCreateConcatWorkloadTest({ 2, 3, 4, 5 }, 2); } -BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload) +TEST_CASE("CreateConcatDim3Float32Workload") { RefCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } -BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload) +TEST_CASE("CreateConcatDim3Uint8Workload") { RefCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } @@ -1057,25 +1059,25 @@ static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape) // Check output is as expected auto queueDescriptor = workload->GetData(); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); + CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); } -BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload) +TEST_CASE("CreateConstantUint8Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } -BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload) +TEST_CASE("CreateConstantInt16Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } -BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload) +TEST_CASE("CreateConstantFloat32Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } -BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload) +TEST_CASE("CreateConstantSigned32Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } @@ -1097,53 +1099,53 @@ static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape, // Check output is as expected auto queueDescriptor = workload->GetData(); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType))); + CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType))); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload) +TEST_CASE("CreatePreluFloat32Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload) +TEST_CASE("CreatePreluFloat16Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16); } -BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload) +TEST_CASE("CreatePreluUint8Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8); } -BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload) +TEST_CASE("CreatePreluInt16Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload) +TEST_CASE("CreatePreluFloat32NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32), armnn::InvalidArgumentException); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload) +TEST_CASE("CreatePreluFloat16NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16), armnn::InvalidArgumentException); } -BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload) +TEST_CASE("CreatePreluUint8NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8), armnn::InvalidArgumentException); } -BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload) +TEST_CASE("CreatePreluInt16NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16), armnn::InvalidArgumentException); } @@ -1161,22 +1163,22 @@ static void RefCreateSpaceToDepthWorkloadTest() TensorInfo({ 1, 1, 1, 4 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32) +TEST_CASE("CreateSpaceToDepthWorkloadFloat32") { RefCreateSpaceToDepthWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16) +TEST_CASE("CreateSpaceToDepthWorkloadFloat16") { RefCreateSpaceToDepthWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8) +TEST_CASE("CreateSpaceToDepthWorkloadQASymm8") { RefCreateSpaceToDepthWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16) +TEST_CASE("CreateSpaceToDepthWorkloadQSymm16") { RefCreateSpaceToDepthWorkloadTest(); } @@ -1201,23 +1203,23 @@ static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape, for (unsigned int i = 0; i < numInputs; ++i) { auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); - BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType))); + CHECK((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType))); } auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); + CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); } -BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload) +TEST_CASE("CreateStackFloat32Workload") { RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2); } -BOOST_AUTO_TEST_CASE(CreateStackUint8Workload) +TEST_CASE("CreateStackUint8Workload") { RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2); } -BOOST_AUTO_TEST_CASE(CreateStackUint16Workload) +TEST_CASE("CreateStackUint16Workload") { RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2); } @@ -1241,14 +1243,14 @@ static void RefCreateQLstmWorkloadTest() auto cellStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); - BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); - BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); + CHECK((inputHandle->GetTensorInfo() == inputInfo)); + CHECK((cellStateOutHandle->GetTensorInfo() == cellStateInfo)); + CHECK((outputHandle->GetTensorInfo() == outputInfo)); } -BOOST_AUTO_TEST_CASE(CreateQLstmWorkload) +TEST_CASE("CreateQLstmWorkload") { RefCreateQLstmWorkloadTest(); } -BOOST_AUTO_TEST_SUITE_END() +} diff --git a/src/backends/reference/test/RefDetectionPostProcessTests.cpp b/src/backends/reference/test/RefDetectionPostProcessTests.cpp index fab6e00bad..763578be3c 100644 --- a/src/backends/reference/test/RefDetectionPostProcessTests.cpp +++ b/src/backends/reference/test/RefDetectionPostProcessTests.cpp @@ -8,46 +8,46 @@ #include #include -#include +#include -BOOST_AUTO_TEST_SUITE(RefDetectionPostProcess) - -BOOST_AUTO_TEST_CASE(TopKSortTest) +TEST_SUITE("RefDetectionPostProcess") +{ +TEST_CASE("TopKSortTest") { unsigned int k = 3; unsigned int indices[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; float values[8] = { 0, 7, 6, 5, 4, 3, 2, 500 }; armnn::TopKSort(k, indices, values, 8); - BOOST_TEST(indices[0] == 7); - BOOST_TEST(indices[1] == 1); - BOOST_TEST(indices[2] == 2); + CHECK(indices[0] == 7); + CHECK(indices[1] == 1); + CHECK(indices[2] == 2); } -BOOST_AUTO_TEST_CASE(FullTopKSortTest) +TEST_CASE("FullTopKSortTest") { unsigned int k = 8; unsigned int indices[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; float values[8] = { 0, 7, 6, 5, 4, 3, 2, 500 }; armnn::TopKSort(k, indices, values, 8); - BOOST_TEST(indices[0] == 7); - BOOST_TEST(indices[1] == 1); - BOOST_TEST(indices[2] == 2); - BOOST_TEST(indices[3] == 3); - BOOST_TEST(indices[4] == 4); - BOOST_TEST(indices[5] == 5); - BOOST_TEST(indices[6] == 6); - BOOST_TEST(indices[7] == 0); + CHECK(indices[0] == 7); + CHECK(indices[1] == 1); + CHECK(indices[2] == 2); + CHECK(indices[3] == 3); + CHECK(indices[4] == 4); + CHECK(indices[5] == 5); + CHECK(indices[6] == 6); + CHECK(indices[7] == 0); } -BOOST_AUTO_TEST_CASE(IouTest) +TEST_CASE("IouTest") { float boxI[4] = { 0.0f, 0.0f, 10.0f, 10.0f }; float boxJ[4] = { 1.0f, 1.0f, 11.0f, 11.0f }; float iou = armnn::IntersectionOverUnion(boxI, boxJ); - BOOST_TEST(iou == 0.68, boost::test_tools::tolerance(0.001)); + CHECK(iou == doctest::Approx(0.68).epsilon(0.001f)); } -BOOST_AUTO_TEST_CASE(NmsFunction) +TEST_CASE("NmsFunction") { std::vector boxCorners({ 0.0f, 0.0f, 1.0f, 1.0f, @@ -63,10 +63,10 @@ BOOST_AUTO_TEST_CASE(NmsFunction) std::vector result = armnn::NonMaxSuppression(6, boxCorners, scores, 0.0, 3, 0.5); - BOOST_TEST(result.size() == 3); - BOOST_TEST(result[0] == 3); - BOOST_TEST(result[1] == 0); - BOOST_TEST(result[2] == 5); + CHECK(result.size() == 3); + CHECK(result[0] == 3); + CHECK(result[1] == 0); + CHECK(result[2] == 5); } void DetectionPostProcessTestImpl(bool useRegularNms, @@ -149,28 +149,22 @@ void DetectionPostProcessTestImpl(bool useRegularNms, detectionScores.data(), numDetections.data()); - BOOST_CHECK_EQUAL_COLLECTIONS(detectionBoxes.begin(), + CHECK(std::equal(detectionBoxes.begin(), detectionBoxes.end(), expectedDetectionBoxes.begin(), - expectedDetectionBoxes.end()); - - BOOST_CHECK_EQUAL_COLLECTIONS(detectionScores.begin(), - detectionScores.end(), - expectedDetectionScores.begin(), - expectedDetectionScores.end()); - - BOOST_CHECK_EQUAL_COLLECTIONS(detectionClasses.begin(), - detectionClasses.end(), - expectedDetectionClasses.begin(), - expectedDetectionClasses.end()); - - BOOST_CHECK_EQUAL_COLLECTIONS(numDetections.begin(), - numDetections.end(), - expectedNumDetections.begin(), - expectedNumDetections.end()); + expectedDetectionBoxes.end())); + + CHECK(std::equal(detectionScores.begin(), detectionScores.end(), + expectedDetectionScores.begin(), expectedDetectionScores.end())); + + CHECK(std::equal(detectionClasses.begin(), detectionClasses.end(), + expectedDetectionClasses.begin(), expectedDetectionClasses.end())); + + CHECK(std::equal(numDetections.begin(), numDetections.end(), + expectedNumDetections.begin(), expectedNumDetections.end())); } -BOOST_AUTO_TEST_CASE(RegularNmsDetectionPostProcess) +TEST_CASE("RegularNmsDetectionPostProcess") { std::vector expectedDetectionBoxes({ 0.0f, 10.0f, 1.0f, 11.0f, @@ -186,7 +180,7 @@ BOOST_AUTO_TEST_CASE(RegularNmsDetectionPostProcess) expectedDetectionScores, expectedNumDetections); } -BOOST_AUTO_TEST_CASE(FastNmsDetectionPostProcess) +TEST_CASE("FastNmsDetectionPostProcess") { std::vector expectedDetectionBoxes({ 0.0f, 10.0f, 1.0f, 11.0f, @@ -201,4 +195,4 @@ BOOST_AUTO_TEST_CASE(FastNmsDetectionPostProcess) expectedDetectionScores, expectedNumDetections); } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +} \ No newline at end of file diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 910df292a9..69a2048078 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -28,14 +28,14 @@ #include #include -#include - -BOOST_AUTO_TEST_SUITE(RefEndToEnd) +#include +TEST_SUITE("RefEndToEnd") +{ std::vector defaultBackends = {armnn::Compute::CpuRef}; // Abs -BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32) +TEST_CASE("RefAbsEndToEndTestFloat32") { std::vector expectedOutput = { @@ -48,7 +48,7 @@ BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8) +TEST_CASE("RefAbsEndToEndTestUint8") { // Note the expected output will be implicitly quantized by the below test function std::vector expectedOutput = @@ -62,7 +62,7 @@ BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16) +TEST_CASE("RefAbsEndToEndTestInt16") { // Note the expected output will be implicitly quantized by the below test function std::vector expectedOutput = @@ -77,17 +77,17 @@ BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16) } // Constant -BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32) +TEST_CASE("ConstantUsage_Ref_Float32") { - BOOST_TEST(ConstantUsageFloat32Test(defaultBackends)); + CHECK(ConstantUsageFloat32Test(defaultBackends)); } -BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8) +TEST_CASE("ConstantUsage_Ref_Uint8") { - BOOST_TEST(ConstantUsageUint8Test(defaultBackends)); + CHECK(ConstantUsageUint8Test(defaultBackends)); } -BOOST_AUTO_TEST_CASE(Unsigned8) +TEST_CASE("Unsigned8") { using namespace armnn; @@ -122,7 +122,7 @@ BOOST_AUTO_TEST_CASE(Unsigned8) // Loads it into the runtime. NetworkId netId; auto error = runtime->LoadNetwork(netId, std::move(optNet)); - BOOST_TEST(error == Status::Success); + CHECK(error == Status::Success); // Creates structures for input & output. std::vector inputData @@ -144,14 +144,14 @@ BOOST_AUTO_TEST_CASE(Unsigned8) runtime->EnqueueWorkload(netId, inputTensors, outputTensors); // Checks the results. - BOOST_TEST(outputData[0] == 0); - BOOST_TEST(outputData[1] == 0); - BOOST_TEST(outputData[2] == 0); - BOOST_TEST(outputData[3] == 255); // softmax has been saturated. - BOOST_TEST(outputData[4] == 0); + CHECK(outputData[0] == 0); + CHECK(outputData[1] == 0); + CHECK(outputData[2] == 0); + CHECK(outputData[3] == 255); // softmax has been saturated. + CHECK(outputData[4] == 0); } -BOOST_AUTO_TEST_CASE(TrivialAdd) +TEST_CASE("TrivialAdd") { // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp. @@ -211,21 +211,21 @@ BOOST_AUTO_TEST_CASE(TrivialAdd) runtime->EnqueueWorkload(netId, inputTensors, outputTensors); // Checks the results - BOOST_TEST(outputData[0] == 101); - BOOST_TEST(outputData[1] == 202); - BOOST_TEST(outputData[2] == 303); - BOOST_TEST(outputData[3] == 404); - BOOST_TEST(outputData[4] == 505); - BOOST_TEST(outputData[5] == 606); - BOOST_TEST(outputData[6] == 707); - BOOST_TEST(outputData[7] == 808); - BOOST_TEST(outputData[8] == 909); - BOOST_TEST(outputData[9] == 1010); - BOOST_TEST(outputData[10] == 1111); - BOOST_TEST(outputData[11] == 1212); -} - -BOOST_AUTO_TEST_CASE(MultipleOutputs) + CHECK(outputData[0] == 101); + CHECK(outputData[1] == 202); + CHECK(outputData[2] == 303); + CHECK(outputData[3] == 404); + CHECK(outputData[4] == 505); + CHECK(outputData[5] == 606); + CHECK(outputData[6] == 707); + CHECK(outputData[7] == 808); + CHECK(outputData[8] == 909); + CHECK(outputData[9] == 1010); + CHECK(outputData[10] == 1111); + CHECK(outputData[11] == 1212); +} + +TEST_CASE("MultipleOutputs") { using namespace armnn; @@ -306,12 +306,12 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs) runtime->EnqueueWorkload(netId, inputTensors, outputTensors); // Checks the results. - BOOST_TEST(output1Data == std::vector({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1 - BOOST_TEST(output2Data == std::vector({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6 - BOOST_TEST(output3Data == std::vector({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5] + CHECK(output1Data == std::vector({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1 + CHECK(output2Data == std::vector({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6 + CHECK(output3Data == std::vector({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5] } -BOOST_AUTO_TEST_CASE(TrivialMin) +TEST_CASE("TrivialMin") { using namespace armnn; @@ -369,13 +369,13 @@ BOOST_AUTO_TEST_CASE(TrivialMin) runtime->EnqueueWorkload(netId, inputTensors, outputTensors); // Checks the results - BOOST_TEST(outputData[0] == 1); - BOOST_TEST(outputData[1] == 1); - BOOST_TEST(outputData[2] == 3); - BOOST_TEST(outputData[3] == 2); + CHECK(outputData[0] == 1); + CHECK(outputData[1] == 1); + CHECK(outputData[2] == 3); + CHECK(outputData[3] == 2); } -BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest) +TEST_CASE("RefEqualSimpleEndToEndTest") { const std::vector expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }); @@ -385,7 +385,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest) +TEST_CASE("RefGreaterSimpleEndToEndTest") { const std::vector expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); @@ -395,7 +395,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test) +TEST_CASE("RefEqualSimpleEndToEndUint8Test") { const std::vector expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }); @@ -405,7 +405,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test) +TEST_CASE("RefGreaterSimpleEndToEndUint8Test") { const std::vector expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); @@ -415,7 +415,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest) +TEST_CASE("RefEqualBroadcastEndToEndTest") { const std::vector expectedOutput({ 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); @@ -425,7 +425,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest) +TEST_CASE("RefGreaterBroadcastEndToEndTest") { const std::vector expectedOutput({ 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 }); @@ -435,7 +435,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test) +TEST_CASE("RefEqualBroadcastEndToEndUint8Test") { const std::vector expectedOutput({ 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); @@ -445,7 +445,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test) +TEST_CASE("RefGreaterBroadcastEndToEndUint8Test") { const std::vector expectedOutput({ 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 }); @@ -455,249 +455,249 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test) expectedOutput); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NHWCTest) +TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NHWCTest") { BatchToSpaceNdEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest) +TEST_CASE("RefBatchToSpaceNdEndToEndUint8NHWCTest") { BatchToSpaceNdEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest) +TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NHWCTest") { BatchToSpaceNdEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest) +TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NCHWTest") { BatchToSpaceNdEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest) +TEST_CASE("RefBatchToSpaceNdEndToEndUint8NCHWTest") { BatchToSpaceNdEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest) +TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NCHWTest") { BatchToSpaceNdEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest) +TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest") { BatchToSpaceNdComplexEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest) +TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NHWCTest") { BatchToSpaceNdComplexEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest) +TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest") { BatchToSpaceNdComplexEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest) +TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest") { BatchToSpaceNdComplexEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest) +TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NCHWTest") { BatchToSpaceNdComplexEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest) +TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest") { BatchToSpaceNdComplexEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test) +TEST_CASE("RefConcatEndToEndDim0Test") { ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test) +TEST_CASE("RefConcatEndToEndDim0Uint8Test") { ConcatDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test) +TEST_CASE("RefConcatEndToEndDim1Test") { ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test) +TEST_CASE("RefConcatEndToEndDim1Uint8Test") { ConcatDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test) +TEST_CASE("RefConcatEndToEndDim2Test") { ConcatDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test) +TEST_CASE("RefConcatEndToEndDim2Uint8Test") { ConcatDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test) +TEST_CASE("RefConcatEndToEndDim3Test") { ConcatDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test) +TEST_CASE("RefConcatEndToEndDim3Uint8Test") { ConcatDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat32) +TEST_CASE("RefEluEndToEndTestFloat32") { EluEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat16) +TEST_CASE("RefEluEndToEndTestFloat16") { EluEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefEluEndToEndTestBFloat16) +TEST_CASE("RefEluEndToEndTestBFloat16") { EluEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmS8) +TEST_CASE("RefEluEndToEndTestQAsymmS8") { EluEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmU8) +TEST_CASE("RefEluEndToEndTestQAsymmU8") { EluEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQSymmS16) +TEST_CASE("RefEluEndToEndTestQSymmS16") { EluEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefFillEndToEndTest) +TEST_CASE("RefFillEndToEndTest") { FillEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16) +TEST_CASE("RefFillEndToEndTestFloat16") { FillEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefFillEndToEndTestInt32) +TEST_CASE("RefFillEndToEndTestInt32") { FillEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefFullyConnectedEndToEndTestInt32) +TEST_CASE("RefFullyConnectedEndToEndTestInt32") { FullyConnectedWithDynamicWeightsEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefGatherFloatTest) +TEST_CASE("RefGatherFloatTest") { GatherEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefGatherUint8Test) +TEST_CASE("RefGatherUint8Test") { GatherEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefGatherInt16Test) +TEST_CASE("RefGatherInt16Test") { GatherEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest) +TEST_CASE("RefGatherMultiDimFloatTest") { GatherMultiDimEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test) +TEST_CASE("RefGatherMultiDimUint8Test") { GatherMultiDimEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test) +TEST_CASE("RefGatherMultiDimInt16Test") { GatherMultiDimEndToEnd(defaultBackends); } // DepthToSpace -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32) +TEST_CASE("DephtToSpaceEndToEndNchwFloat32") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16) +TEST_CASE("DephtToSpaceEndToEndNchwFloat16") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8) +TEST_CASE("DephtToSpaceEndToEndNchwUint8") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16) +TEST_CASE("DephtToSpaceEndToEndNchwInt16") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32) +TEST_CASE("DephtToSpaceEndToEndNhwcFloat32") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16) +TEST_CASE("DephtToSpaceEndToEndNhwcFloat16") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8) +TEST_CASE("DephtToSpaceEndToEndNhwcUint8") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16) +TEST_CASE("DephtToSpaceEndToEndNhwcInt16") { DepthToSpaceEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } // Dequantize -BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest) +TEST_CASE("DequantizeEndToEndSimpleTest") { DequantizeEndToEndSimple(defaultBackends); } -BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest) +TEST_CASE("DequantizeEndToEndOffsetTest") { DequantizeEndToEndOffset(defaultBackends); } -BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test) +TEST_CASE("DequantizeEndToEndSimpleInt16Test") { DequantizeEndToEndSimple(defaultBackends); } -BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test) +TEST_CASE("DequantizeEndToEndOffsetInt16Test") { DequantizeEndToEndOffset(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest) +TEST_CASE("RefDetectionPostProcessRegularNmsTest") { std::vector boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, @@ -734,7 +734,7 @@ inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& } } -BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test) +TEST_CASE("RefDetectionPostProcessRegularNmsUint8Test") { armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); @@ -783,7 +783,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test) 1.0f, 1, 0.01f, 0, 0.5f, 0); } -BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsTest) +TEST_CASE("RefDetectionPostProcessFastNmsTest") { std::vector boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, @@ -812,7 +812,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsTest) DetectionPostProcessFastNmsEndToEnd(defaultBackends, boxEncodings, scores, anchors); } -BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test) +TEST_CASE("RefDetectionPostProcessFastNmsUint8Test") { armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); @@ -862,494 +862,493 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test) } // HardSwish -BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestFloat32) +TEST_CASE("RefHardSwishEndToEndTestFloat32") { HardSwishEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestFloat16) +TEST_CASE("RefHardSwishEndToEndTestFloat16") { HardSwishEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestBFloat16) +TEST_CASE("RefHardSwishEndToEndTestBFloat16") { -HardSwishEndToEndTest(defaultBackends); + HardSwishEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQAsymmS8) +TEST_CASE("RefHardSwishEndToEndTestQAsymmS8") { HardSwishEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQAsymmU8) +TEST_CASE("RefHardSwishEndToEndTestQAsymmU8") { HardSwishEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQSymmS16) +TEST_CASE("RefHardSwishEndToEndTestQSymmS16") { HardSwishEndToEndTest(defaultBackends); } // LogSoftmax -BOOST_AUTO_TEST_CASE(RefLogSoftmaxEndToEndTest) +TEST_CASE("RefLogSoftmaxEndToEndTest") { LogSoftmaxEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestFloat32) +TEST_CASE("RefPreluEndToEndTestFloat32") { PreluEndToEndNegativeTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestUint8) +TEST_CASE("RefPreluEndToEndTestUint8") { PreluEndToEndPositiveTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestQSymm16) +TEST_CASE("RefPreluEndToEndTestQSymm16") { PreluEndToEndPositiveTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest1) +TEST_CASE("RefSpaceToDepthNhwcEndToEndTest1") { SpaceToDepthNhwcEndToEndTest1(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSpaceToDepthNchwEndToEndTest1) +TEST_CASE("RefSpaceToDepthNchwEndToEndTest1") { SpaceToDepthNchwEndToEndTest1(defaultBackends); - } -BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest2) +TEST_CASE("RefSpaceToDepthNhwcEndToEndTest2") { SpaceToDepthNhwcEndToEndTest2(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSpaceToDepthNchwEndToEndTest2) +TEST_CASE("RefSpaceToDepthNchwEndToEndTest2") { SpaceToDepthNchwEndToEndTest2(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest) +TEST_CASE("RefSplitter1dEndToEndTest") { Splitter1dEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test) +TEST_CASE("RefSplitter1dEndToEndUint8Test") { Splitter1dEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest) +TEST_CASE("RefSplitter2dDim0EndToEndTest") { Splitter2dDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndTest) +TEST_CASE("RefSplitter2dDim1EndToEndTest") { Splitter2dDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test) +TEST_CASE("RefSplitter2dDim0EndToEndUint8Test") { Splitter2dDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test) +TEST_CASE("RefSplitter2dDim1EndToEndUint8Test") { Splitter2dDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest) +TEST_CASE("RefSplitter3dDim0EndToEndTest") { Splitter3dDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndTest) +TEST_CASE("RefSplitter3dDim1EndToEndTest") { Splitter3dDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndTest) +TEST_CASE("RefSplitter3dDim2EndToEndTest") { Splitter3dDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test) +TEST_CASE("RefSplitter3dDim0EndToEndUint8Test") { Splitter3dDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test) +TEST_CASE("RefSplitter3dDim1EndToEndUint8Test") { Splitter3dDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test) +TEST_CASE("RefSplitter3dDim2EndToEndUint8Test") { Splitter3dDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest) +TEST_CASE("RefSplitter4dDim0EndToEndTest") { Splitter4dDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndTest) +TEST_CASE("RefSplitter4dDim1EndToEndTest") { Splitter4dDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndTest) +TEST_CASE("RefSplitter4dDim2EndToEndTest") { Splitter4dDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndTest) +TEST_CASE("RefSplitter4dDim3EndToEndTest") { Splitter4dDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test) +TEST_CASE("RefSplitter4dDim0EndToEndUint8Test") { Splitter4dDim0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test) +TEST_CASE("RefSplitter4dDim1EndToEndUint8Test") { Splitter4dDim1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test) +TEST_CASE("RefSplitter4dDim2EndToEndUint8Test") { Splitter4dDim2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test) +TEST_CASE("RefSplitter4dDim3EndToEndUint8Test") { Splitter4dDim3EndToEnd(defaultBackends); } // TransposeConvolution2d -BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest) +TEST_CASE("RefTransposeConvolution2dEndToEndFloatNchwTest") { TransposeConvolution2dEndToEnd( defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest) +TEST_CASE("RefTransposeConvolution2dEndToEndUint8NchwTest") { TransposeConvolution2dEndToEnd( defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest) +TEST_CASE("RefTransposeConvolution2dEndToEndInt16NchwTest") { TransposeConvolution2dEndToEnd( defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest) +TEST_CASE("RefTransposeConvolution2dEndToEndFloatNhwcTest") { TransposeConvolution2dEndToEnd( defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest) +TEST_CASE("RefTransposeConvolution2dEndToEndUint8NhwcTest") { TransposeConvolution2dEndToEnd( defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest) +TEST_CASE("RefTransposeConvolution2dEndToEndInt16NhwcTest") { TransposeConvolution2dEndToEnd( defaultBackends, armnn::DataLayout::NHWC); } // Resize Bilinear -BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNchwTest) +TEST_CASE("RefResizeBilinearEndToEndFloatNchwTest") { ResizeBilinearEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest) +TEST_CASE("RefResizeBilinearEndToEndUint8NchwTest") { ResizeBilinearEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest) +TEST_CASE("RefResizeBilinearEndToEndInt16NchwTest") { ResizeBilinearEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest) +TEST_CASE("RefResizeBilinearEndToEndFloatNhwcTest") { ResizeBilinearEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest) +TEST_CASE("RefResizeBilinearEndToEndUint8NhwcTest") { ResizeBilinearEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest) +TEST_CASE("RefResizeBilinearEndToEndInt16NhwcTest") { ResizeBilinearEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } // Resize NearestNeighbor -BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNchwTest) +TEST_CASE("RefResizeNearestNeighborEndToEndFloatNchwTest") { ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest) +TEST_CASE("RefResizeNearestNeighborEndToEndUint8NchwTest") { ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest) +TEST_CASE("RefResizeNearestNeighborEndToEndInt16NchwTest") { ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest) +TEST_CASE("RefResizeNearestNeighborEndToEndFloatNhwcTest") { ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest) +TEST_CASE("RefResizeNearestNeighborEndToEndUint8NhwcTest") { ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest) +TEST_CASE("RefResizeNearestNeighborEndToEndInt16NhwcTest") { ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NHWC); } // InstanceNormalization -BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest1) +TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest1") { InstanceNormalizationNhwcEndToEndTest1(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest1) +TEST_CASE("RefInstanceNormalizationNchwEndToEndTest1") { InstanceNormalizationNchwEndToEndTest1(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest2) +TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest2") { InstanceNormalizationNhwcEndToEndTest2(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2) +TEST_CASE("RefInstanceNormalizationNchwEndToEndTest2") { InstanceNormalizationNchwEndToEndTest2(defaultBackends); } // ArgMinMax -BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest) +TEST_CASE("RefArgMaxSimpleTest") { ArgMaxEndToEndSimple(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test) +TEST_CASE("RefArgMaxSimpleUint8Test") { ArgMaxEndToEndSimple(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest) +TEST_CASE("RefArgMinSimpleTest") { ArgMinEndToEndSimple(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test) +TEST_CASE("RefArgMinSimpleUint8Test") { ArgMinEndToEndSimple(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test) +TEST_CASE("RefArgMaxAxis0Test") { ArgMaxAxis0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test) +TEST_CASE("RefArgMaxAxis0Uint8Test") { ArgMaxAxis0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test) +TEST_CASE("RefArgMinAxis0Test") { ArgMinAxis0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test) +TEST_CASE("RefArgMinAxis0Uint8Test") { ArgMinAxis0EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test) +TEST_CASE("RefArgMaxAxis1Test") { ArgMaxAxis1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test) +TEST_CASE("RefArgMaxAxis1Uint8Test") { ArgMaxAxis1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test) +TEST_CASE("RefArgMinAxis1Test") { ArgMinAxis1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test) +TEST_CASE("RefArgMinAxis1Uint8Test") { ArgMinAxis1EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test) +TEST_CASE("RefArgMaxAxis2Test") { ArgMaxAxis2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test) +TEST_CASE("RefArgMaxAxis2Uint8Test") { ArgMaxAxis2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test) +TEST_CASE("RefArgMinAxis2Test") { ArgMinAxis2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test) +TEST_CASE("RefArgMinAxis2Uint8Test") { ArgMinAxis2EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test) +TEST_CASE("RefArgMaxAxis3Test") { ArgMaxAxis3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test) +TEST_CASE("RefArgMaxAxis3Uint8Test") { ArgMaxAxis3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test) +TEST_CASE("RefArgMinAxis3Test") { ArgMinAxis3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test) +TEST_CASE("RefArgMinAxis3Uint8Test") { ArgMinAxis3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefQLstmEndToEndTest) +TEST_CASE("RefQLstmEndToEndTest") { QLstmEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefRankEndToEndTest) +TEST_CASE("RefRankEndToEndTest") { RankEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefRankEndToEndTestFloat16) +TEST_CASE("RefRankEndToEndTestFloat16") { RankEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefRankEndToEndTestInt32) +TEST_CASE("RefRankEndToEndTestInt32") { RankEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQAsymmS8) +TEST_CASE("RefRankEndToEndTestQAsymmS8") { RankEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQSymmS16) +TEST_CASE("RefRankEndToEndTestQSymmS16") { RankEndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQSymmS8) +TEST_CASE("RefRankEndToEndTestQSymmS8") { RankEndToEnd(defaultBackends); } #if !defined(__ANDROID__) // Only run these tests on non Android platforms -BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest) +TEST_CASE("RefImportNonAlignedPointerTest") { ImportNonAlignedInputPointerTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefExportNonAlignedPointerTest) +TEST_CASE("RefExportNonAlignedPointerTest") { ExportNonAlignedOutputPointerTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefImportAlignedPointerTest) +TEST_CASE("RefImportAlignedPointerTest") { ImportAlignedPointerTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefImportOnlyWorkload) +TEST_CASE("RefImportOnlyWorkload") { ImportOnlyWorkload(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefExportOnlyWorkload) +TEST_CASE("RefExportOnlyWorkload") { ExportOnlyWorkload(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefImportAndExportWorkload) +TEST_CASE("RefImportAndExportWorkload") { ImportAndExportWorkload(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefExportOutputWithSeveralOutputSlotConnectionsTest) +TEST_CASE("RefExportOutputWithSeveralOutputSlotConnectionsTest") { ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefStridedSliceInvalidSliceEndToEndTest) +TEST_CASE("RefStridedSliceInvalidSliceEndToEndTest") { StridedSliceInvalidSliceEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefThreadSafeFP32StridedSlicedEndToEndTest) +TEST_CASE("RefThreadSafeFP32StridedSlicedEndToEndTest") { armnn::experimental::StridedSlicedEndToEndTest(defaultBackends, 1); } -BOOST_AUTO_TEST_CASE(RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest) +TEST_CASE("RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest") { armnn::experimental::StridedSlicedMultiThreadedEndToEndTest(defaultBackends); } -BOOST_AUTO_TEST_CASE(RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest) +TEST_CASE("RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest") { armnn::experimental::StridedSlicedEndToEndTest(defaultBackends, 3); } #endif -BOOST_AUTO_TEST_SUITE_END() +} diff --git a/src/backends/reference/test/RefJsonPrinterTests.cpp b/src/backends/reference/test/RefJsonPrinterTests.cpp index ff604a7ade..15b591a242 100644 --- a/src/backends/reference/test/RefJsonPrinterTests.cpp +++ b/src/backends/reference/test/RefJsonPrinterTests.cpp @@ -7,16 +7,16 @@ #include -#include +#include #include -BOOST_AUTO_TEST_SUITE(RefJsonPrinter) - -BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuRefTest) +TEST_SUITE("RefJsonPrinter") +{ +TEST_CASE("SoftmaxProfilerJsonPrinterCpuRefTest") { std::vector backends = {armnn::Compute::CpuRef}; RunSoftmaxProfilerJsonPrinterTest(backends); } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +} \ No newline at end of file diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp index a1487061b6..1adc54e990 100644 --- a/src/backends/reference/test/RefLayerSupportTests.cpp +++ b/src/backends/reference/test/RefLayerSupportTests.cpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include @@ -27,13 +27,14 @@ bool LayerTypeMatchesTest() } // anonymous namespace -BOOST_AUTO_TEST_SUITE(RefLayerSupported) - -BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches) +TEST_SUITE("RefLayerSupported") +{ +TEST_CASE("IsLayerSupportedLayerTypeMatches") { LayerTypeMatchesTest(); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition) + +TEST_CASE("IsLayerSupportedReferenceAddition") { armnn::TensorShape shape0 = {1,1,3,4}; armnn::TensorShape shape1 = {4}; @@ -44,232 +45,232 @@ BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition) armnn::RefLayerSupport supportChecker; std::string reasonNotSupported; - BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported)); + CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported)); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedBFloat16Reference) +TEST_CASE("IsLayerSupportedBFloat16Reference") { armnn::RefWorkloadFactory factory; IsLayerSupportedTests(&factory); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference) +TEST_CASE("IsLayerSupportedFloat16Reference") { armnn::RefWorkloadFactory factory; IsLayerSupportedTests(&factory); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference) +TEST_CASE("IsLayerSupportedFloat32Reference") { armnn::RefWorkloadFactory factory; IsLayerSupportedTests(&factory); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference) +TEST_CASE("IsLayerSupportedUint8Reference") { armnn::RefWorkloadFactory factory; IsLayerSupportedTests(&factory); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedInt8Reference) +TEST_CASE("IsLayerSupportedInt8Reference") { armnn::RefWorkloadFactory factory; IsLayerSupportedTests(&factory); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference) +TEST_CASE("IsLayerSupportedInt16Reference") { armnn::RefWorkloadFactory factory; IsLayerSupportedTests(&factory); } -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference) +TEST_CASE("IsConvertFp16ToFp32SupportedReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); } -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference) +TEST_CASE("IsConvertFp16ToFp32SupportedFp32InputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float32 data type input"); } -BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference) +TEST_CASE("IsConvertFp16ToFp32SupportedFp16OutputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type output"); } -BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedReference) +TEST_CASE("IsConvertBf16ToFp32SupportedReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); } -BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedFp32InputReference) +TEST_CASE("IsConvertBf16ToFp32SupportedFp32InputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n"); } -BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedBf16OutputReference) +TEST_CASE("IsConvertBf16ToFp32SupportedBf16OutputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n"); } -BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedReference) +TEST_CASE("IsConvertFp32ToBf16SupportedReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); } -BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedBf16InputReference) +TEST_CASE("IsConvertFp32ToBf16SupportedBf16InputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n"); } -BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedFp32OutputReference) +TEST_CASE("IsConvertFp32ToBf16SupportedFp32OutputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n"); } -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference) +TEST_CASE("IsConvertFp32ToFp16SupportedReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); } -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference) +TEST_CASE("IsConvertFp32ToFp16SupportedFp16InputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type input"); } -BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference) +TEST_CASE("IsConvertFp32ToFp16SupportedFp32OutputReference") { std::string reasonIfUnsupported; bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); - BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output"); + CHECK(!result); + CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float32 data type output"); } -BOOST_AUTO_TEST_CASE(IsLayerSupportedMeanDimensionsReference) +TEST_CASE("IsLayerSupportedMeanDimensionsReference") { std::string reasonIfUnsupported; bool result = IsMeanLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); } -BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference) +TEST_CASE("IsLayerNotSupportedMeanDimensionsReference") { std::string reasonIfUnsupported; bool result = IsMeanLayerNotSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); + CHECK(!result); - BOOST_CHECK(reasonIfUnsupported.find( + CHECK(reasonIfUnsupported.find( "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.") != std::string::npos); } -BOOST_AUTO_TEST_CASE(IsConstantSupportedRef) +TEST_CASE("IsConstantSupportedRef") { std::string reasonIfUnsupported; bool result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(!result); + CHECK(!result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); result = IsConstantLayerSupportedTests(reasonIfUnsupported); - BOOST_CHECK(result); + CHECK(result); } -BOOST_AUTO_TEST_SUITE_END() +} diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index df48877108..6bc6f8aa05 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -11,9 +11,8 @@ #include -#include - -BOOST_AUTO_TEST_SUITE(Compute_Reference) +TEST_SUITE("Compute_Reference") +{ using namespace armnn; @@ -1437,15 +1436,15 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test< ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test) // Lstm -BOOST_AUTO_TEST_CASE(LstmUtilsZeroVector) { +TEST_CASE("LstmUtilsZeroVector") { LstmUtilsZeroVectorTest(); } -BOOST_AUTO_TEST_CASE(LstmUtilsMeanStddevNormalization) { +TEST_CASE("LstmUtilsMeanStddevNormalization") { LstmUtilsMeanStddevNormalizationNoneZeroInputTest(); LstmUtilsMeanStddevNormalizationAllZeroInputTest(); LstmUtilsMeanStddevNormalizationMixedZeroInputTest(); } -BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorCwiseProduct) { +TEST_CASE("LstmUtilsVectorBatchVectorCwiseProduct") { LstmUtilsVectorBatchVectorCwiseProductTest(); } -BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorAdd) { +TEST_CASE("LstmUtilsVectorBatchVectorAdd") { LstmUtilsVectorBatchVectorAddTest(); } ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection, @@ -1873,43 +1872,37 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Abs2dQuantisedSymm16, Abs2dTest) // Detection PostProcess -BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat) +TEST_CASE("DetectionPostProcessRegularNmsFloat") { DetectionPostProcessRegularNmsFloatTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat) +TEST_CASE("DetectionPostProcessFastNmsFloat") { DetectionPostProcessFastNmsFloatTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8) +TEST_CASE("DetectionPostProcessRegularNmsInt8") { - DetectionPostProcessRegularNmsQuantizedTest< - RefWorkloadFactory, DataType::QAsymmS8>(); + DetectionPostProcessRegularNmsQuantizedTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8) +TEST_CASE("DetectionPostProcessFastNmsInt8") { - DetectionPostProcessRegularNmsQuantizedTest< - RefWorkloadFactory, DataType::QAsymmS8>(); + DetectionPostProcessRegularNmsQuantizedTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8) +TEST_CASE("DetectionPostProcessRegularNmsUint8") { - DetectionPostProcessRegularNmsQuantizedTest< - RefWorkloadFactory, DataType::QAsymmU8>(); + DetectionPostProcessRegularNmsQuantizedTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8) +TEST_CASE("DetectionPostProcessFastNmsUint8") { - DetectionPostProcessRegularNmsQuantizedTest< - RefWorkloadFactory, DataType::QAsymmU8>(); + DetectionPostProcessRegularNmsQuantizedTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt16) +TEST_CASE("DetectionPostProcessRegularNmsInt16") { - DetectionPostProcessRegularNmsQuantizedTest< - RefWorkloadFactory, DataType::QSymmS16>(); + DetectionPostProcessRegularNmsQuantizedTest(); } -BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16) +TEST_CASE("DetectionPostProcessFastNmsInt16") { - DetectionPostProcessFastNmsQuantizedTest< - RefWorkloadFactory, DataType::QSymmS16>(); + DetectionPostProcessFastNmsQuantizedTest(); } // Dequantize @@ -2271,4 +2264,4 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMax2Float32, ReduceMaxSimpleTest2) ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinNegativeAxisFloat32, ReduceMinNegativeAxisTest) -BOOST_AUTO_TEST_SUITE_END() +} \ No newline at end of file diff --git a/src/backends/reference/test/RefMemoryManagerTests.cpp b/src/backends/reference/test/RefMemoryManagerTests.cpp index 15b7c2af4f..960e7ccad5 100644 --- a/src/backends/reference/test/RefMemoryManagerTests.cpp +++ b/src/backends/reference/test/RefMemoryManagerTests.cpp @@ -5,47 +5,48 @@ #include -#include +#include -BOOST_AUTO_TEST_SUITE(RefMemoryManagerTests) +TEST_SUITE("RefMemoryManagerTests") +{ using namespace armnn; using Pool = RefMemoryManager::Pool; -BOOST_AUTO_TEST_CASE(ManageOneThing) +TEST_CASE("ManageOneThing") { RefMemoryManager memoryManager; Pool* pool = memoryManager.Manage(10); - BOOST_CHECK(pool); + CHECK(pool); memoryManager.Acquire(); - BOOST_CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer + CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer memoryManager.Release(); } -BOOST_AUTO_TEST_CASE(ManageTwoThings) +TEST_CASE("ManageTwoThings") { RefMemoryManager memoryManager; Pool* pool1 = memoryManager.Manage(10); Pool* pool2 = memoryManager.Manage(5); - BOOST_CHECK(pool1); - BOOST_CHECK(pool2); + CHECK(pool1); + CHECK(pool2); memoryManager.Acquire(); void *p1 = memoryManager.GetPointer(pool1); void *p2 = memoryManager.GetPointer(pool2); - BOOST_CHECK(p1); - BOOST_CHECK(p2); - BOOST_CHECK(p1 != p2); + CHECK(p1); + CHECK(p2); + CHECK(p1 != p2); memoryManager.Release(); } -BOOST_AUTO_TEST_SUITE_END() +} diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp index 2f25b6cd4d..578d667983 100644 --- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp +++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp @@ -7,13 +7,13 @@ #include #include - -#include #include -BOOST_AUTO_TEST_SUITE(RefOptimizedNetwork) +#include -BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads) +TEST_SUITE("RefOptimizedNetwork") +{ +TEST_CASE("OptimizeValidateCpuRefWorkloads") { const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32); @@ -73,17 +73,17 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads) armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); armnn::Graph& graph = GetGraphForTesting(optNet.get()); graph.AllocateDynamicBuffers(); - BOOST_CHECK(optNet); + CHECK(optNet); // Validates workloads. armnn::RefWorkloadFactory fact; for (auto&& layer : graph) { - BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact)); + CHECK_NOTHROW(layer->CreateWorkload(fact)); } } -BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer) +TEST_CASE("OptimizeValidateWorkloadsCpuRefPermuteLayer") { // Create runtime in which test will run armnn::IRuntime::CreationOptions options; @@ -115,11 +115,11 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer) for (auto&& layer : graph) { - BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef); + CHECK(layer->GetBackendId() == armnn::Compute::CpuRef); } } -BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer) +TEST_CASE("OptimizeValidateWorkloadsCpuRefMeanLayer") { // Create runtime in which test will run armnn::IRuntime::CreationOptions options; @@ -149,11 +149,11 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer) graph.AllocateDynamicBuffers(); for (auto&& layer : graph) { - BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef); + CHECK(layer->GetBackendId() == armnn::Compute::CpuRef); } } -BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef) +TEST_CASE("DebugTestOnCpuRef") { // build up the structure of the network armnn::INetworkPtr net(armnn::INetwork::Create()); @@ -192,14 +192,14 @@ BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef) graph.AllocateDynamicBuffers(); // Tests that all layers are present in the graph. - BOOST_TEST(graph.GetNumLayers() == 5); + CHECK(graph.GetNumLayers() == 5); // Tests that the vertices exist and have correct names. - BOOST_TEST(GraphHasNamedLayer(graph, "InputLayer")); - BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer_0")); - BOOST_TEST(GraphHasNamedLayer(graph, "ActivationLayer")); - BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer_0")); - BOOST_TEST(GraphHasNamedLayer(graph, "OutputLayer")); + CHECK(GraphHasNamedLayer(graph, "InputLayer")); + CHECK(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer_0")); + CHECK(GraphHasNamedLayer(graph, "ActivationLayer")); + CHECK(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer_0")); + CHECK(GraphHasNamedLayer(graph, "OutputLayer")); } -BOOST_AUTO_TEST_SUITE_END() +} diff --git a/src/backends/reference/test/RefRuntimeTests.cpp b/src/backends/reference/test/RefRuntimeTests.cpp index 17d5816b9b..6fd4910d24 100644 --- a/src/backends/reference/test/RefRuntimeTests.cpp +++ b/src/backends/reference/test/RefRuntimeTests.cpp @@ -9,14 +9,15 @@ #include -#include +#include -BOOST_AUTO_TEST_SUITE(RefRuntime) #ifdef ARMNN_LEAK_CHECKING_ENABLED -BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef) +TEST_SUITE("RefRuntime") { - BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE()); +TEST_CASE("RuntimeMemoryLeaksCpuRef") +{ + CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE()); armnn::IRuntime::CreationOptions options; armnn::RuntimeImpl runtime(options); @@ -31,16 +32,16 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef) { ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef"); - BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE()); + CHECK(ARMNN_NO_LEAKS_IN_SCOPE()); // In the second run we check for all remaining memory // in use after the network was unloaded. If there is any // then it will be treated as a memory leak. CreateAndDropDummyNetwork(backends, runtime); - BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE()); - BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0); - BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0); + CHECK(ARMNN_NO_LEAKS_IN_SCOPE()); + CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0); + CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0); } } +} #endif -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp index dadd1de1f2..39f5a2aeed 100644 --- a/src/backends/reference/test/RefTensorHandleTests.cpp +++ b/src/backends/reference/test/RefTensorHandleTests.cpp @@ -5,12 +5,13 @@ #include #include -#include +#include -BOOST_AUTO_TEST_SUITE(RefTensorHandleTests) +TEST_SUITE("RefTensorHandleTests") +{ using namespace armnn; -BOOST_AUTO_TEST_CASE(AcquireAndRelease) +TEST_CASE("AcquireAndRelease") { std::shared_ptr memoryManager = std::make_shared(); @@ -24,11 +25,11 @@ BOOST_AUTO_TEST_CASE(AcquireAndRelease) { float* buffer = reinterpret_cast(handle.Map()); - BOOST_CHECK(buffer != nullptr); // Yields a valid pointer + CHECK(buffer != nullptr); // Yields a valid pointer buffer[0] = 2.5f; - BOOST_CHECK(buffer[0] == 2.5f); // Memory is writable and readable + CHECK(buffer[0] == 2.5f); // Memory is writable and readable } memoryManager->Release(); @@ -37,16 +38,16 @@ BOOST_AUTO_TEST_CASE(AcquireAndRelease) { float* buffer = reinterpret_cast(handle.Map()); - BOOST_CHECK(buffer != nullptr); // Yields a valid pointer + CHECK(buffer != nullptr); // Yields a valid pointer buffer[0] = 3.5f; - BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable + CHECK(buffer[0] == 3.5f); // Memory is writable and readable } memoryManager->Release(); } -BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryMemoryManaged) +TEST_CASE("RefTensorHandleFactoryMemoryManaged") { std::shared_ptr memoryManager = std::make_shared(); RefTensorHandleFactory handleFactory(memoryManager); @@ -60,31 +61,31 @@ BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryMemoryManaged) memoryManager->Acquire(); { float* buffer = reinterpret_cast(handle->Map()); - BOOST_CHECK(buffer != nullptr); // Yields a valid pointer + CHECK(buffer != nullptr); // Yields a valid pointer buffer[0] = 1.5f; buffer[1] = 2.5f; - BOOST_CHECK(buffer[0] == 1.5f); // Memory is writable and readable - BOOST_CHECK(buffer[1] == 2.5f); // Memory is writable and readable + CHECK(buffer[0] == 1.5f); // Memory is writable and readable + CHECK(buffer[1] == 2.5f); // Memory is writable and readable } memoryManager->Release(); memoryManager->Acquire(); { float* buffer = reinterpret_cast(handle->Map()); - BOOST_CHECK(buffer != nullptr); // Yields a valid pointer + CHECK(buffer != nullptr); // Yields a valid pointer buffer[0] = 3.5f; buffer[1] = 4.5f; - BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable - BOOST_CHECK(buffer[1] == 4.5f); // Memory is writable and readable + CHECK(buffer[0] == 3.5f); // Memory is writable and readable + CHECK(buffer[1] == 4.5f); // Memory is writable and readable } memoryManager->Release(); float testPtr[2] = { 2.5f, 5.5f }; // Cannot import as import is disabled - BOOST_CHECK(!handle->Import(static_cast(testPtr), MemorySource::Malloc)); + CHECK(!handle->Import(static_cast(testPtr), MemorySource::Malloc)); } -BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryImport) +TEST_CASE("RefTensorHandleFactoryImport") { std::shared_ptr memoryManager = std::make_shared(); RefTensorHandleFactory handleFactory(memoryManager); @@ -97,25 +98,25 @@ BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryImport) memoryManager->Acquire(); // No buffer allocated when import is enabled - BOOST_CHECK_THROW(handle->Map(), armnn::NullPointerException); + CHECK_THROWS_AS(handle->Map(), armnn::NullPointerException); float testPtr[2] = { 2.5f, 5.5f }; // Correctly import - BOOST_CHECK(handle->Import(static_cast(testPtr), MemorySource::Malloc)); + CHECK(handle->Import(static_cast(testPtr), MemorySource::Malloc)); float* buffer = reinterpret_cast(handle->Map()); - BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import - BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr + CHECK(buffer != nullptr); // Yields a valid pointer after import + CHECK(buffer == testPtr); // buffer is pointing to testPtr // Memory is writable and readable with correct value - BOOST_CHECK(buffer[0] == 2.5f); - BOOST_CHECK(buffer[1] == 5.5f); + CHECK(buffer[0] == 2.5f); + CHECK(buffer[1] == 5.5f); buffer[0] = 3.5f; buffer[1] = 10.0f; - BOOST_CHECK(buffer[0] == 3.5f); - BOOST_CHECK(buffer[1] == 10.0f); + CHECK(buffer[0] == 3.5f); + CHECK(buffer[1] == 10.0f); memoryManager->Release(); } -BOOST_AUTO_TEST_CASE(RefTensorHandleImport) +TEST_CASE("RefTensorHandleImport") { TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32); RefTensorHandle handle(info, static_cast(MemorySource::Malloc)); @@ -124,24 +125,24 @@ BOOST_AUTO_TEST_CASE(RefTensorHandleImport) handle.Allocate(); // No buffer allocated when import is enabled - BOOST_CHECK_THROW(handle.Map(), armnn::NullPointerException); + CHECK_THROWS_AS(handle.Map(), armnn::NullPointerException); float testPtr[2] = { 2.5f, 5.5f }; // Correctly import - BOOST_CHECK(handle.Import(static_cast(testPtr), MemorySource::Malloc)); + CHECK(handle.Import(static_cast(testPtr), MemorySource::Malloc)); float* buffer = reinterpret_cast(handle.Map()); - BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import - BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr + CHECK(buffer != nullptr); // Yields a valid pointer after import + CHECK(buffer == testPtr); // buffer is pointing to testPtr // Memory is writable and readable with correct value - BOOST_CHECK(buffer[0] == 2.5f); - BOOST_CHECK(buffer[1] == 5.5f); + CHECK(buffer[0] == 2.5f); + CHECK(buffer[1] == 5.5f); buffer[0] = 3.5f; buffer[1] = 10.0f; - BOOST_CHECK(buffer[0] == 3.5f); - BOOST_CHECK(buffer[1] == 10.0f); + CHECK(buffer[0] == 3.5f); + CHECK(buffer[1] == 10.0f); } -BOOST_AUTO_TEST_CASE(RefTensorHandleGetCapabilities) +TEST_CASE("RefTensorHandleGetCapabilities") { std::shared_ptr memoryManager = std::make_shared(); RefTensorHandleFactory handleFactory(memoryManager); @@ -155,10 +156,10 @@ BOOST_AUTO_TEST_CASE(RefTensorHandleGetCapabilities) std::vector capabilities = handleFactory.GetCapabilities(input, output, CapabilityClass::PaddingRequired); - BOOST_CHECK(capabilities.empty()); + CHECK(capabilities.empty()); } -BOOST_AUTO_TEST_CASE(RefTensorHandleSupportsInPlaceComputation) +TEST_CASE("RefTensorHandleSupportsInPlaceComputation") { std::shared_ptr memoryManager = std::make_shared(); RefTensorHandleFactory handleFactory(memoryManager); @@ -167,7 +168,7 @@ BOOST_AUTO_TEST_CASE(RefTensorHandleSupportsInPlaceComputation) ARMNN_ASSERT(!(handleFactory.SupportsInPlaceComputation())); } -BOOST_AUTO_TEST_CASE(TestManagedConstTensorHandle) +TEST_CASE("TestManagedConstTensorHandle") { // Initialize arguments void* mem = nullptr; @@ -178,31 +179,31 @@ BOOST_AUTO_TEST_CASE(TestManagedConstTensorHandle) // Test managed handle is initialized with m_Mapped unset and once Map() called its set ManagedConstTensorHandle managedHandle(passThroughHandle); - BOOST_CHECK(!managedHandle.IsMapped()); + CHECK(!managedHandle.IsMapped()); managedHandle.Map(); - BOOST_CHECK(managedHandle.IsMapped()); + CHECK(managedHandle.IsMapped()); // Test it can then be unmapped managedHandle.Unmap(); - BOOST_CHECK(!managedHandle.IsMapped()); + CHECK(!managedHandle.IsMapped()); // Test member function - BOOST_CHECK(managedHandle.GetTensorInfo() == info); + CHECK(managedHandle.GetTensorInfo() == info); // Test that nullptr tensor handle doesn't get mapped ManagedConstTensorHandle managedHandleNull(nullptr); - BOOST_CHECK(!managedHandleNull.IsMapped()); - BOOST_CHECK_THROW(managedHandleNull.Map(), armnn::Exception); - BOOST_CHECK(!managedHandleNull.IsMapped()); + CHECK(!managedHandleNull.IsMapped()); + CHECK_THROWS_AS(managedHandleNull.Map(), armnn::Exception); + CHECK(!managedHandleNull.IsMapped()); // Check Unmap() when m_Mapped already false managedHandleNull.Unmap(); - BOOST_CHECK(!managedHandleNull.IsMapped()); + CHECK(!managedHandleNull.IsMapped()); } #if !defined(__ANDROID__) // Only run these tests on non Android platforms -BOOST_AUTO_TEST_CASE(CheckSourceType) +TEST_CASE("CheckSourceType") { TensorInfo info({1}, DataType::Float32); RefTensorHandle handle(info, static_cast(MemorySource::Malloc)); @@ -210,18 +211,18 @@ BOOST_AUTO_TEST_CASE(CheckSourceType) int* testPtr = new int(4); // Not supported - BOOST_CHECK(!handle.Import(static_cast(testPtr), MemorySource::DmaBuf)); + CHECK(!handle.Import(static_cast(testPtr), MemorySource::DmaBuf)); // Not supported - BOOST_CHECK(!handle.Import(static_cast(testPtr), MemorySource::DmaBufProtected)); + CHECK(!handle.Import(static_cast(testPtr), MemorySource::DmaBufProtected)); // Supported - BOOST_CHECK(handle.Import(static_cast(testPtr), MemorySource::Malloc)); + CHECK(handle.Import(static_cast(testPtr), MemorySource::Malloc)); delete testPtr; } -BOOST_AUTO_TEST_CASE(ReusePointer) +TEST_CASE("ReusePointer") { TensorInfo info({1}, DataType::Float32); RefTensorHandle handle(info, static_cast(MemorySource::Malloc)); @@ -231,12 +232,12 @@ BOOST_AUTO_TEST_CASE(ReusePointer) handle.Import(static_cast(testPtr), MemorySource::Malloc); // Reusing previously Imported pointer - BOOST_CHECK(handle.Import(static_cast(testPtr), MemorySource::Malloc)); + CHECK(handle.Import(static_cast(testPtr), MemorySource::Malloc)); delete testPtr; } -BOOST_AUTO_TEST_CASE(MisalignedPointer) +TEST_CASE("MisalignedPointer") { TensorInfo info({2}, DataType::Float32); RefTensorHandle handle(info, static_cast(MemorySource::Malloc)); @@ -247,11 +248,11 @@ BOOST_AUTO_TEST_CASE(MisalignedPointer) // Increment pointer by 1 byte void* misalignedPtr = static_cast(reinterpret_cast(testPtr) + 1); - BOOST_CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc)); + CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc)); delete[] testPtr; } #endif -BOOST_AUTO_TEST_SUITE_END() +} -- cgit v1.2.1