From 1625efc870f1a8b7c6e6382277ddbb245f91a294 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Thu, 10 Jun 2021 18:24:34 +0100 Subject: IVGCVSW-5963 'Move unit tests to new framework' * Used doctest in ArmNN unit tests Signed-off-by: Sadik Armagan Change-Id: Ia9cf5fc72775878885c5f864abf2c56b3a935f1a --- .../reference/test/RefCreateWorkloadTests.cpp | 300 +++++++++++---------- 1 file changed, 151 insertions(+), 149 deletions(-) (limited to 'src/backends/reference/test/RefCreateWorkloadTests.cpp') diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 0f86e7eeff..4293ef54f3 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -10,6 +10,8 @@ #include #include +#include + namespace { @@ -19,8 +21,8 @@ void CheckInputOutput(std::unique_ptr workload, const TensorInfo& inpu auto queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); + CHECK((inputHandle->GetTensorInfo() == inputInfo)); + CHECK((outputHandle->GetTensorInfo() == outputInfo)); } template @@ -33,9 +35,9 @@ void CheckInputsOutput(std::unique_ptr workload, auto inputHandle0 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0)); - BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); + CHECK((inputHandle0->GetTensorInfo() == inputInfo0)); + CHECK((inputHandle1->GetTensorInfo() == inputInfo1)); + CHECK((outputHandle->GetTensorInfo() == outputInfo)); } armnn::RefWorkloadFactory GetFactory() @@ -47,8 +49,8 @@ armnn::RefWorkloadFactory GetFactory() } -BOOST_AUTO_TEST_SUITE(CreateWorkloadRef) - +TEST_SUITE("CreateWorkloadRef") +{ template static void RefCreateActivationWorkloadTest() { @@ -62,12 +64,12 @@ static void RefCreateActivationWorkloadTest() TensorInfo({ 1, 1 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload) +TEST_CASE("CreateActivationFloat32Workload") { RefCreateActivationWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload) +TEST_CASE("CreateActivationUint8Workload") { RefCreateActivationWorkloadTest(); } @@ -89,7 +91,7 @@ static void RefCreateElementwiseWorkloadTest() TensorInfo({ 2, 3 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest) +TEST_CASE("CreateSubtractionWorkloadWithBlobTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -106,7 +108,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest) TensorInfo({ 2, 3 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest) +TEST_CASE("CreateAdditionWorkloadWithBlobTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -122,7 +124,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest) TensorInfo({ 2, 3 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest) +TEST_CASE("CreateMultiplicationWorkloadWithBlobTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -138,7 +140,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest) TensorInfo({2, 3}, DataType)); } -BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) +TEST_CASE("CreateAdditionFloatWorkload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -146,7 +148,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) +TEST_CASE("CreateAdditionUint8Workload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -154,7 +156,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload) +TEST_CASE("CreateAdditionInt16Workload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -162,7 +164,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload) +TEST_CASE("CreateAdditionInt32Workload") { RefCreateElementwiseWorkloadTest, AdditionQueueDescriptor, @@ -170,7 +172,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload) armnn::DataType::Signed32>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload) +TEST_CASE("CreateSubtractionFloat32Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -178,7 +180,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) +TEST_CASE("CreateSubtractionFloat16Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -186,7 +188,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) armnn::DataType::Float16>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) +TEST_CASE("CreateSubtractionUint8Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -194,7 +196,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload) +TEST_CASE("CreateSubtractionInt16Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -202,7 +204,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload) +TEST_CASE("CreateSubtractionInt32Workload") { RefCreateElementwiseWorkloadTest, SubtractionQueueDescriptor, @@ -210,7 +212,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload) armnn::DataType::Signed32>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) +TEST_CASE("CreateMultiplicationFloatWorkload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -218,7 +220,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) +TEST_CASE("CreateMultiplicationUint8Workload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -226,7 +228,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload) +TEST_CASE("CreateMultiplicationInt16Workload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -234,7 +236,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload) +TEST_CASE("CreateMultiplicationInt32Workload") { RefCreateElementwiseWorkloadTest, MultiplicationQueueDescriptor, @@ -242,7 +244,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload) armnn::DataType::Signed32>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload) +TEST_CASE("CreateDivisionFloat32Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -250,7 +252,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload) armnn::DataType::Float32>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload) +TEST_CASE("CreateDivisionFloat16Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -258,7 +260,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload) armnn::DataType::Float16>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) +TEST_CASE("CreateDivisionUint8Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -266,7 +268,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) armnn::DataType::QAsymmU8>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload) +TEST_CASE("CreateDivisionInt16Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -274,7 +276,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload) armnn::DataType::QSymmS16>(); } -BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload) +TEST_CASE("CreateDivisionInt32Workload") { RefCreateElementwiseWorkloadTest, DivisionQueueDescriptor, @@ -311,7 +313,7 @@ static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload) +TEST_CASE("CreateBatchNormalizationWithBlobFloat32Workload") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -329,55 +331,55 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload) CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType)); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload) +TEST_CASE("CreateBatchNormalizationFloat32Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationFloat32WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload) +TEST_CASE("CreateBatchNormalizationFloat16Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationFloat16WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload) +TEST_CASE("CreateBatchNormalizationUint8Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationUint8WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload) +TEST_CASE("CreateBatchNormalizationInt16Workload") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc) +TEST_CASE("CreateBatchNormalizationInt16WorkloadNhwc") { RefCreateBatchNormalizationWorkloadTest (DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload) +TEST_CASE("CreateConvertFp16ToFp32Float32Workload") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -388,7 +390,7 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload) std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32)); } -BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload) +TEST_CASE("CreateConvertFp32ToFp16Float16Workload") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -417,17 +419,17 @@ static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayou TensorInfo(outputShape, DataType::Float32)); } -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload) +TEST_CASE("CreateConvolution2dFloatNchwWorkload") { RefCreateConvolution2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload) +TEST_CASE("CreateConvolution2dFloatNhwcWorkload") { RefCreateConvolution2dWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload) +TEST_CASE("CreateConvolution2dWithBlobWorkload") { DataLayout dataLayout = DataLayout::NHWC; Graph graph; @@ -464,12 +466,12 @@ static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout) TensorInfo(outputShape, DataType::Float32)); } -BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload) +TEST_CASE("CreateDepthwiseConvolutionFloat32NhwcWorkload") { RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest) +TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest") { Graph graph; RefWorkloadFactory factory = GetFactory(); @@ -499,17 +501,17 @@ static void RefCreateFullyConnectedWorkloadTest() TensorInfo({ 3, 7 }, DataType, outputQScale)); } -BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32) +TEST_CASE("CreateFullyConnectedWorkloadFloat32") { RefCreateFullyConnectedWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8) +TEST_CASE("CreateFullyConnectedWorkloadQuantisedAsymm8") { RefCreateFullyConnectedWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16) +TEST_CASE("CreateFullyConnectedWorkloadQuantisedSymm16") { RefCreateFullyConnectedWorkloadTest(); } @@ -541,32 +543,32 @@ static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout) CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload) +TEST_CASE("CreateRefNormalizationFloat32NchwWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload) +TEST_CASE("CreateRefNormalizationFloat32NhwcWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload) +TEST_CASE("CreateRefNormalizationUint8NchwWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload) +TEST_CASE("CreateRefNormalizationUint8NhwcWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload) +TEST_CASE("CreateRefNormalizationInt16NchwWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload) +TEST_CASE("CreateRefNormalizationInt16NhwcWorkload") { RefCreateNormalizationWorkloadTest(DataLayout::NHWC); } @@ -599,32 +601,32 @@ static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout) TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload) +TEST_CASE("CreatePooling2dFloat32Workload") { RefCreatePooling2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload) +TEST_CASE("CreatePooling2dFloat32NhwcWorkload") { RefCreatePooling2dWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload) +TEST_CASE("CreatePooling2dUint8Workload") { RefCreatePooling2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload) +TEST_CASE("CreatePooling2dUint8NhwcWorkload") { RefCreatePooling2dWorkloadTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload) +TEST_CASE("CreatePooling2dInt16Workload") { RefCreatePooling2dWorkloadTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload) +TEST_CASE("CreatePooling2dInt16NhwcWorkload") { RefCreatePooling2dWorkloadTest(DataLayout::NHWC); } @@ -655,22 +657,22 @@ static void RefCreateSoftmaxWorkloadTest() tensorInfo); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) +TEST_CASE("CreateSoftmaxFloat32Workload") { RefCreateSoftmaxWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload) +TEST_CASE("CreateSoftmaxFloat16Workload") { RefCreateSoftmaxWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload) +TEST_CASE("CreateSoftmaxQuantisedAsymm8Workload") { RefCreateSoftmaxWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload) +TEST_CASE("CreateSoftmaxQuantisedSymm16Workload") { RefCreateSoftmaxWorkloadTest(); } @@ -685,29 +687,29 @@ static void RefCreateSplitterWorkloadTest() // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); - BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); + CHECK((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType))); auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); + CHECK((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType))); auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); - BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); + CHECK((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); - BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); + CHECK((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType))); } -BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload) +TEST_CASE("CreateSplitterFloat32Workload") { RefCreateSplitterWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload) +TEST_CASE("CreateSplitterFloat16Workload") { RefCreateSplitterWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) +TEST_CASE("CreateSplitterUint8Workload") { RefCreateSplitterWorkloadTest(); } @@ -735,27 +737,27 @@ static void RefCreateSplitterConcatWorkloadTest() armnn::RefTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); armnn::RefTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(mIn0); - BOOST_TEST(mIn1); + CHECK(sOut0); + CHECK(sOut1); + CHECK(mIn0); + CHECK(mIn1); bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); - BOOST_TEST(validDataPointers); + CHECK(validDataPointers); } -BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32) +TEST_CASE("CreateSplitterConcatFloat32") { RefCreateSplitterConcatWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16) +TEST_CASE("CreateSplitterConcatFloat16") { RefCreateSplitterConcatWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8) +TEST_CASE("CreateSplitterConcatUint8") { RefCreateSplitterConcatWorkloadTest(); } @@ -785,26 +787,26 @@ static void RefCreateSingleOutputMultipleInputsTest() armnn::RefTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); - BOOST_TEST(sOut0); - BOOST_TEST(sOut1); - BOOST_TEST(activ0_0Im); - BOOST_TEST(activ0_1Im); - BOOST_TEST(activ1_0Im); - BOOST_TEST(activ1_1Im); + CHECK(sOut0); + CHECK(sOut1); + CHECK(activ0_0Im); + CHECK(activ0_1Im); + CHECK(activ1_0Im); + CHECK(activ1_1Im); bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); - BOOST_TEST(validDataPointers); + CHECK(validDataPointers); } -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32) +TEST_CASE("CreateSingleOutputMultipleInputsFloat32") { RefCreateSingleOutputMultipleInputsTest(); } -BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8) +TEST_CASE("CreateSingleOutputMultipleInputsUint8") { RefCreateSingleOutputMultipleInputsTest(); @@ -838,27 +840,27 @@ static void RefCreateResizeBilinearTest(DataLayout dataLayout) TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) +TEST_CASE("CreateResizeBilinearFloat32") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16) +TEST_CASE("CreateResizeBilinearFloat16") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) +TEST_CASE("CreateResizeBilinearUint8") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16) +TEST_CASE("CreateResizeBilinearQuantisedAsymm16") { RefCreateResizeBilinearTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc) +TEST_CASE("CreateResizeBilinearFloat32Nhwc") { RefCreateResizeBilinearTest(DataLayout::NHWC); } @@ -876,22 +878,22 @@ static void RefCreateBatchToSpaceNdTest() TensorInfo({ 1, 1, 1, 1 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32) +TEST_CASE("CreateBatchToSpaceNdFloat32") { RefCreateBatchToSpaceNdTest(); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16) +TEST_CASE("CreateBatchToSpaceNdFloat16") { RefCreateBatchToSpaceNdTest(); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8) +TEST_CASE("CreateBatchToSpaceNdUint8") { RefCreateBatchToSpaceNdTest(); } -BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16) +TEST_CASE("CreateBatchToSpaceNdQSymm16") { RefCreateBatchToSpaceNdTest(); } @@ -924,32 +926,32 @@ static void RefCreateL2NormalizationTest(DataLayout dataLayout) CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32) +TEST_CASE("CreateL2NormalizationFloat32") { RefCreateL2NormalizationTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc) +TEST_CASE("CreateL2NormalizationFloat32Nhwc") { RefCreateL2NormalizationTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16) +TEST_CASE("CreateL2NormalizationInt16") { RefCreateL2NormalizationTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc) +TEST_CASE("CreateL2NormalizationInt16Nhwc") { RefCreateL2NormalizationTest(DataLayout::NHWC); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8) +TEST_CASE("CreateL2NormalizationUint8") { RefCreateL2NormalizationTest(DataLayout::NCHW); } -BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc) +TEST_CASE("CreateL2NormalizationUint8Nhwc") { RefCreateL2NormalizationTest(DataLayout::NHWC); } @@ -968,17 +970,17 @@ static void RefCreateReshapeWorkloadTest() TensorInfo({ 1, 4 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32) +TEST_CASE("CreateReshapeWorkloadFloat32") { RefCreateReshapeWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8) +TEST_CASE("CreateReshapeWorkloadQuantisedAsymm8") { RefCreateReshapeWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16) +TEST_CASE("CreateReshapeWorkloadQuantisedSymm16") { RefCreateReshapeWorkloadTest(); } @@ -997,52 +999,52 @@ static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape, TensorInfo(outputShape, DataType)); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) +TEST_CASE("CreateConcatDim0Float32Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload) +TEST_CASE("CreateConcatDim0Float16Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) +TEST_CASE("CreateConcatDim0Uint8Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload) +TEST_CASE("CreateConcatDim0Uint16Workload") { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } -BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload) +TEST_CASE("CreateConcatDim1Float32Workload") { RefCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload) +TEST_CASE("CreateConcatDim1Uint8Workload") { RefCreateConcatWorkloadTest({ 2, 6, 2, 5 }, 1); } -BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload) +TEST_CASE("CreateConcatDim2Float32Workload") { RefCreateConcatWorkloadTest({ 2, 3, 4, 5 }, 2); } -BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload) +TEST_CASE("CreateConcatDim2Uint8Workload") { RefCreateConcatWorkloadTest({ 2, 3, 4, 5 }, 2); } -BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload) +TEST_CASE("CreateConcatDim3Float32Workload") { RefCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } -BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload) +TEST_CASE("CreateConcatDim3Uint8Workload") { RefCreateConcatWorkloadTest({ 2, 3, 2, 10 }, 3); } @@ -1057,25 +1059,25 @@ static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape) // Check output is as expected auto queueDescriptor = workload->GetData(); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); + CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); } -BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload) +TEST_CASE("CreateConstantUint8Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } -BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload) +TEST_CASE("CreateConstantInt16Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } -BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload) +TEST_CASE("CreateConstantFloat32Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } -BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload) +TEST_CASE("CreateConstantSigned32Workload") { RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } @@ -1097,53 +1099,53 @@ static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape, // Check output is as expected auto queueDescriptor = workload->GetData(); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType))); + CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType))); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload) +TEST_CASE("CreatePreluFloat32Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload) +TEST_CASE("CreatePreluFloat16Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16); } -BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload) +TEST_CASE("CreatePreluUint8Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8); } -BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload) +TEST_CASE("CreatePreluInt16Workload") { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload) +TEST_CASE("CreatePreluFloat32NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32), armnn::InvalidArgumentException); } -BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload) +TEST_CASE("CreatePreluFloat16NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16), armnn::InvalidArgumentException); } -BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload) +TEST_CASE("CreatePreluUint8NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8), armnn::InvalidArgumentException); } -BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload) +TEST_CASE("CreatePreluInt16NoBroadcastWorkload") { - BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16), armnn::InvalidArgumentException); } @@ -1161,22 +1163,22 @@ static void RefCreateSpaceToDepthWorkloadTest() TensorInfo({ 1, 1, 1, 4 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32) +TEST_CASE("CreateSpaceToDepthWorkloadFloat32") { RefCreateSpaceToDepthWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16) +TEST_CASE("CreateSpaceToDepthWorkloadFloat16") { RefCreateSpaceToDepthWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8) +TEST_CASE("CreateSpaceToDepthWorkloadQASymm8") { RefCreateSpaceToDepthWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16) +TEST_CASE("CreateSpaceToDepthWorkloadQSymm16") { RefCreateSpaceToDepthWorkloadTest(); } @@ -1201,23 +1203,23 @@ static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape, for (unsigned int i = 0; i < numInputs; ++i) { auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); - BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType))); + CHECK((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType))); } auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); + CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); } -BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload) +TEST_CASE("CreateStackFloat32Workload") { RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2); } -BOOST_AUTO_TEST_CASE(CreateStackUint8Workload) +TEST_CASE("CreateStackUint8Workload") { RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2); } -BOOST_AUTO_TEST_CASE(CreateStackUint16Workload) +TEST_CASE("CreateStackUint16Workload") { RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2); } @@ -1241,14 +1243,14 @@ static void RefCreateQLstmWorkloadTest() auto cellStateOutHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); - BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); - BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo)); - BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); + CHECK((inputHandle->GetTensorInfo() == inputInfo)); + CHECK((cellStateOutHandle->GetTensorInfo() == cellStateInfo)); + CHECK((outputHandle->GetTensorInfo() == outputInfo)); } -BOOST_AUTO_TEST_CASE(CreateQLstmWorkload) +TEST_CASE("CreateQLstmWorkload") { RefCreateQLstmWorkloadTest(); } -BOOST_AUTO_TEST_SUITE_END() +} -- cgit v1.2.1