From 25ab3a8326a9e2c52c84b2747fa72907109a695d Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Mon, 17 May 2021 13:01:52 +0100 Subject: IVGCVSW-5964 Removing some remaining boost utility usages from tests. * Adding a basic PredicateResult class to replace boost::test_tools::predicate_result * Replacing all uses of boost::test_tools::predicate_result with the new armnn::PredicateResult class * Replacing use of boost::test_tools::output_test_stream output with std::ostringstream in ProfilerTests.cpp Signed-off-by: Colm Donelan Change-Id: I75cdbbff98d984e26e4a50c125386b2988516fad --- src/armnn/test/PredicateResult.hpp | 48 +++++++ src/armnn/test/ProfilerTests.cpp | 5 +- src/armnn/test/TensorHelpers.hpp | 15 ++- src/armnn/test/UnitTests.hpp | 6 +- .../test/ParserFlatbuffersSerializeFixture.hpp | 3 +- .../test/ParserFlatbuffersFixture.hpp | 6 +- src/armnnUtils/ParserPrototxtFixture.hpp | 6 +- .../aclCommon/test/CreateWorkloadClNeon.hpp | 22 ++-- src/backends/aclCommon/test/MemCopyTests.cpp | 12 +- .../layerTests/DetectionPostProcessTestImpl.hpp | 12 +- .../test/layerTests/LstmTestImpl.cpp | 12 +- src/backends/cl/test/ClCreateWorkloadTests.cpp | 146 ++++++++++++++------- src/backends/cl/test/ClMemCopyTests.cpp | 12 +- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 20 +-- src/backends/neon/test/NeonMemCopyTests.cpp | 12 +- 15 files changed, 232 insertions(+), 105 deletions(-) create mode 100644 src/armnn/test/PredicateResult.hpp (limited to 'src') diff --git a/src/armnn/test/PredicateResult.hpp b/src/armnn/test/PredicateResult.hpp new file mode 100644 index 0000000000..a344c8e3ad --- /dev/null +++ b/src/armnn/test/PredicateResult.hpp @@ -0,0 +1,48 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +namespace armnn +{ + +class PredicateResult +{ +public: + explicit PredicateResult(bool result) + : m_Result(result) + {} + + PredicateResult(const PredicateResult& predicateResult) + : m_Result(predicateResult.m_Result) + , m_Message(predicateResult.m_Message.str()) + {} + + void SetResult(bool newResult) + { + m_Result = newResult; + } + + std::stringstream& Message() + { + return m_Message; + } + + bool operator!() const + { + return !m_Result; + } + + void operator=(PredicateResult otherPredicateResult) + { + otherPredicateResult.m_Result = m_Result; + } + + bool m_Result; + std::stringstream m_Message; +}; + +} // namespace armnn \ No newline at end of file diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp index a0df3b6b62..21900ffb9a 100644 --- a/src/armnn/test/ProfilerTests.cpp +++ b/src/armnn/test/ProfilerTests.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -225,9 +224,9 @@ BOOST_AUTO_TEST_CASE(WriteEventResults) size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get()); BOOST_TEST(eventSequenceSizeAfter == eventSequenceSizeBefore + 1); - boost::test_tools::output_test_stream output; + std::ostringstream output; profiler->AnalyzeEventsAndWriteResults(output); - BOOST_TEST(!output.is_empty(false)); + BOOST_TEST(!output.str().empty()); // output should contain event name 'test' BOOST_CHECK(output.str().find("test") != std::string::npos); diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index fd9dd83770..ceb6d0f9d7 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -4,6 +4,7 @@ // #pragma once +#include "PredicateResult.hpp" #include #include #include @@ -70,7 +71,7 @@ bool SelectiveCompareBoolean(T a, T b) }; template -boost::test_tools::predicate_result CompareTensors(const boost::multi_array& a, +armnn::PredicateResult CompareTensors(const boost::multi_array& a, const boost::multi_array& b, bool compareBoolean = false, bool isDynamic = false) @@ -84,8 +85,8 @@ boost::test_tools::predicate_result CompareTensors(const boost::multi_array maxReportedDifferences) { errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)"; } - comparisonResult.message() << errorString.str(); + comparisonResult.Message() << errorString.str(); } return comparisonResult; diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp index c15477bf19..b55b13d4c8 100644 --- a/src/armnn/test/UnitTests.hpp +++ b/src/armnn/test/UnitTests.hpp @@ -42,7 +42,8 @@ void CompareTestResultIfSupported(const std::string& testName, const LayerTestRe "The test name does not match the supportedness it is reporting"); if (testResult.supported) { - BOOST_TEST(CompareTensors(testResult.output, testResult.outputExpected, testResult.compareBoolean)); + auto result = CompareTensors(testResult.output, testResult.outputExpected, testResult.compareBoolean); + BOOST_TEST(result.m_Result, result.m_Message.str()); } } @@ -56,7 +57,8 @@ void CompareTestResultIfSupported(const std::string& testName, const std::vector "The test name does not match the supportedness it is reporting"); if (testResult[i].supported) { - BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + auto result = CompareTensors(testResult[i].output, testResult[i].outputExpected); + BOOST_TEST(result.m_Result, result.m_Message.str()); } } } diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp index cea6a43454..5f5ec1c5f4 100644 --- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp +++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp @@ -238,6 +238,7 @@ void ParserFlatbuffersSerializeFixture::RunTest( armnn::BindingPointInfo bindingInfo = ConvertBindingInfo( m_Parser->GetNetworkOutputBindingInfo(layersId, it.first)); auto outputExpected = MakeTensor(bindingInfo.second, it.second); - BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first])); + auto result = CompareTensors(outputExpected, outputStorage[it.first]); + BOOST_TEST(result.m_Result, result.m_Message.str()); } } diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp index fc1d94e21f..f333ac0d40 100644 --- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp +++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp @@ -321,7 +321,8 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, { armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first); auto outputExpected = MakeTensor(bindingInfo.second, it.second, isDynamic); - BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic)); + auto result = CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic); + BOOST_TEST(result.m_Result, result.m_Message.str()); } } @@ -420,6 +421,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId, { armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first); auto outputExpected = MakeTensor(bindingInfo.second, it.second); - BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], false)); + auto result = CompareTensors(outputExpected, outputStorage[it.first], false); + BOOST_TEST(result.m_Result, result.m_Message.str()); } } \ No newline at end of file diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp index 782c181982..ad991efa36 100644 --- a/src/armnnUtils/ParserPrototxtFixture.hpp +++ b/src/armnnUtils/ParserPrototxtFixture.hpp @@ -255,11 +255,13 @@ void ParserPrototxtFixture::RunTest(const std::map(bindingInfo.second, it.second); if (std::is_same::value) { - BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], true)); + auto result = CompareTensors(outputExpected, outputStorage[it.first], true); + BOOST_TEST(result.m_Result, result.m_Message.str()); } else { - BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first])); + auto result = CompareTensors(outputExpected, outputStorage[it.first]); + BOOST_TEST(result.m_Result, result.m_Message.str()); } } } diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp index b14e148287..0a30907f55 100644 --- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp +++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp @@ -5,7 +5,7 @@ #pragma once #include - +#include #include #include #include @@ -27,8 +27,8 @@ namespace using namespace std; template -boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle, - std::initializer_list expectedDimensions) +PredicateResult CompareTensorHandleShape(IComputeTensorHandle* tensorHandle, + std::initializer_list expectedDimensions) { arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info(); @@ -36,8 +36,8 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl auto numExpectedDims = expectedDimensions.size(); if (infoNumDims != numExpectedDims) { - boost::test_tools::predicate_result res(false); - res.message() << "Different number of dimensions [" << info->num_dimensions() + PredicateResult res(false); + res.Message() << "Different number of dimensions [" << info->num_dimensions() << "!=" << expectedDimensions.size() << "]"; return res; } @@ -48,8 +48,8 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl { if (info->dimension(i) != expectedDimension) { - boost::test_tools::predicate_result res(false); - res.message() << "For dimension " << i << + PredicateResult res(false); + res.Message() << "For dimension " << i << " expected size " << expectedDimension << " got " << info->dimension(i); return res; @@ -58,7 +58,7 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl i--; } - return true; + return PredicateResult(true); } template @@ -97,7 +97,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory) auto inputHandle1 = PolymorphicDowncast(queueDescriptor1.m_Inputs[0]); auto outputHandle1 = PolymorphicDowncast(queueDescriptor1.m_Outputs[0]); BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32))); - BOOST_TEST(CompareTensorHandleShape(outputHandle1, {2, 3})); + auto result = CompareTensorHandleShape(outputHandle1, {2, 3}); + BOOST_TEST(result.m_Result, result.m_Message.str()); MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData(); @@ -105,7 +106,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory) BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1); auto inputHandle2 = PolymorphicDowncast(queueDescriptor2.m_Inputs[0]); auto outputHandle2 = PolymorphicDowncast(queueDescriptor2.m_Outputs[0]); - BOOST_TEST(CompareTensorHandleShape(inputHandle2, {2, 3})); + result = CompareTensorHandleShape(inputHandle2, {2, 3}); + BOOST_TEST(result.m_Result, result.m_Message.str()); BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32))); } diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp index 3e26364354..ffba19323a 100644 --- a/src/backends/aclCommon/test/MemCopyTests.cpp +++ b/src/backends/aclCommon/test/MemCopyTests.cpp @@ -48,28 +48,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu) { LayerTestResult result = MemCopyTest(false); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon) { LayerTestResult result = MemCopyTest(false); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors) { LayerTestResult result = MemCopyTest(true); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp index f68082762c..c6636554ea 100644 --- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp @@ -225,10 +225,14 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo, CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get()); CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get()); - BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected)); - BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected)); - BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected)); - BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected)); + auto result = CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected); + BOOST_TEST(result.m_Result, result.m_Message.str()); + result = CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected); + BOOST_TEST(result.m_Result, result.m_Message.str()); + result = CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected); + BOOST_TEST(result.m_Result, result.m_Message.str()); + result = CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected); + BOOST_TEST(result.m_Result, result.m_Message.str()); } template> diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp index 7a9652a8ea..1c63542dcb 100644 --- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp @@ -45,7 +45,8 @@ void LstmUtilsVectorBatchVectorAddTestImpl( VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder); // check shape and compare values - BOOST_TEST(CompareTensors(batchVec, expectedOutput)); + auto result = CompareTensors(batchVec, expectedOutput); + BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position batchVecEncoder->Set(1.0f); @@ -70,7 +71,8 @@ void LstmUtilsZeroVectorTestImpl( ZeroVector(*outputEncoder, vSize); // check shape and compare values - BOOST_TEST(CompareTensors(input, expectedOutput)); + auto result = CompareTensors(input, expectedOutput); + BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position outputEncoder->Set(1.0f); @@ -96,7 +98,8 @@ void LstmUtilsMeanStddevNormalizationTestImpl( MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f); // check shape and compare values - BOOST_TEST(CompareTensors(input, expectedOutput)); + auto result = CompareTensors(input, expectedOutput); + BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position outputEncoder->Set(1.0f); @@ -123,7 +126,8 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl( VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder); // check shape and compare values - BOOST_TEST(CompareTensors(batchVec, expectedOutput)); + auto result = CompareTensors(batchVec, expectedOutput); + BOOST_TEST(result.m_Result, result.m_Message.str()); // check if iterator is back at start position batchVecEncoder->Set(1.0f); diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index 47e2f4e8d7..7602cbbc0b 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -21,8 +21,8 @@ #include #include -boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle, - std::initializer_list expectedDimensions) +armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle, + std::initializer_list expectedDimensions) { return CompareTensorHandleShape(tensorHandle, expectedDimensions); } @@ -43,8 +43,11 @@ static void ClCreateActivationWorkloadTest() auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 1})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + + predResult = CompareIClTensorHandleShape(outputHandle, {1, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload) @@ -74,9 +77,12 @@ static void ClCreateElementwiseWorkloadTest() auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto inputHandle2 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); + auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) @@ -167,8 +173,11 @@ static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op) auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + + predResult = CompareIClTensorHandleShape(outputHandle, {2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest) @@ -192,15 +201,20 @@ static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - switch (dataLayout) + armnn::PredicateResult predResult(true); + switch (dataLayout) { case DataLayout::NHWC: - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 })); + predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); break; default: // NCHW - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 })); + predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } } @@ -239,9 +253,10 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload) ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16)); BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32)); } @@ -258,8 +273,10 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload) auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32)); BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16)); } @@ -470,8 +487,10 @@ static void ClDirectConvolution2dWorkloadTest() Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload) @@ -503,8 +522,10 @@ static void ClCreateFullyConnectedWorkloadTest() FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {3, 7}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -660,8 +681,10 @@ static void ClCreateReshapeWorkloadTest() auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 4})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {1, 4}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload) @@ -705,8 +728,10 @@ static void ClSoftmaxWorkloadTest() tensorInfo.SetQuantizationScale(1.f / 256); } - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {4, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } @@ -742,16 +767,20 @@ static void ClSplitterWorkloadTest() // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); auto outputHandle1 = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7})); + predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); auto outputHandle2 = PolymorphicDowncast(queueDescriptor.m_Outputs[2]); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7})); + predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); auto outputHandle0 = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {1, 7, 7})); + predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload) @@ -931,8 +960,10 @@ static void ClCreateLogSoftmaxWorkloadTest() auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1})); + auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {4, 1}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest) @@ -952,8 +983,10 @@ static void ClCreateLstmWorkloadTest() LstmQueueDescriptor queueDescriptor = workload->GetData(); auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[1]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 })); + auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, {2, 4}); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload) @@ -975,16 +1008,20 @@ static void ClResizeWorkloadTest(DataLayout dataLayout) auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); + armnn::PredicateResult predResult(true); switch (dataLayout) { case DataLayout::NHWC: - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 })); + predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); break; - case DataLayout::NCHW: - default: - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 })); + default: // DataLayout::NCHW + predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } } @@ -1033,8 +1070,10 @@ static void ClMeanWorkloadTest() auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); // The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL. - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 4 })); + auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload) @@ -1067,9 +1106,12 @@ static void ClCreateConcatWorkloadTest(std::initializer_list outpu auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 })); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); + auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, outputShape); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) @@ -1115,8 +1157,10 @@ static void ClSpaceToDepthWorkloadTest() auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 })); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 })); + auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload) @@ -1161,10 +1205,12 @@ static void ClCreateStackWorkloadTest(const std::initializer_list& for (unsigned int i = 0; i < numInputs; ++i) { auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[i]); - BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape)); + auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape); + BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str()); } auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); - BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape)); + auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape); + BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str()); } BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload) diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp index 3cd9af7910..c26f7bdae8 100644 --- a/src/backends/cl/test/ClMemCopyTests.cpp +++ b/src/backends/cl/test/ClMemCopyTests.cpp @@ -19,28 +19,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu) { LayerTestResult result = MemCopyTest(false); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu) { LayerTestResult result = MemCopyTest(false); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index c994bfe55a..a8c0c8aca0 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -23,8 +23,8 @@ BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon) namespace { -boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle, - std::initializer_list expectedDimensions) +armnn::PredicateResult CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle, + std::initializer_list expectedDimensions) { return CompareTensorHandleShape(tensorHandle, expectedDimensions); } @@ -564,16 +564,20 @@ static void NeonCreateResizeWorkloadTest(DataLayout dataLayout) auto inputHandle = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); + armnn::PredicateResult predResult(true); switch (dataLayout) { case DataLayout::NHWC: - BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 })); - BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 })); + predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); break; - case DataLayout::NCHW: - default: - BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 })); - BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 })); + default: // DataLayout::NCHW + predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); + predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } } diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp index dbe1f8da3f..6a3d05d000 100644 --- a/src/backends/neon/test/NeonMemCopyTests.cpp +++ b/src/backends/neon/test/NeonMemCopyTests.cpp @@ -20,28 +20,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon) { LayerTestResult result = MemCopyTest(false); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu) { LayerTestResult result = MemCopyTest(false); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors) { LayerTestResult result = MemCopyTest(true); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors) { LayerTestResult result = MemCopyTest(true); - BOOST_TEST(CompareTensors(result.output, result.outputExpected)); + auto predResult = CompareTensors(result.output, result.outputExpected); + BOOST_TEST(predResult.m_Result, predResult.m_Message.str()); } BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1