aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2021-05-17 13:01:52 +0100
committerColm Donelan <Colm.Donelan@arm.com>2021-05-18 11:25:13 +0100
commit25ab3a8326a9e2c52c84b2747fa72907109a695d (patch)
tree1d4eaaf5b41c68a4e3b3ce2cc400c3ffd76d510c /src/backends
parent1d239f5717e6e4adc47683e30a48b05e7511c734 (diff)
downloadarmnn-25ab3a8326a9e2c52c84b2747fa72907109a695d.tar.gz
IVGCVSW-5964 Removing some remaining boost utility usages from tests.
* Adding a basic PredicateResult class to replace boost::test_tools::predicate_result * Replacing all uses of boost::test_tools::predicate_result with the new armnn::PredicateResult class * Replacing use of boost::test_tools::output_test_stream output with std::ostringstream in ProfilerTests.cpp Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I75cdbbff98d984e26e4a50c125386b2988516fad
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/aclCommon/test/CreateWorkloadClNeon.hpp22
-rw-r--r--src/backends/aclCommon/test/MemCopyTests.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp12
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp146
-rw-r--r--src/backends/cl/test/ClMemCopyTests.cpp12
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp20
-rw-r--r--src/backends/neon/test/NeonMemCopyTests.cpp12
8 files changed, 160 insertions, 88 deletions
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
index b14e148287..0a30907f55 100644
--- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -5,7 +5,7 @@
#pragma once
#include <test/CreateWorkload.hpp>
-
+#include <test/PredicateResult.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <reference/RefWorkloadFactory.hpp>
@@ -27,8 +27,8 @@ namespace
using namespace std;
template<typename IComputeTensorHandle>
-boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
- std::initializer_list<unsigned int> expectedDimensions)
+PredicateResult CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
{
arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
@@ -36,8 +36,8 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl
auto numExpectedDims = expectedDimensions.size();
if (infoNumDims != numExpectedDims)
{
- boost::test_tools::predicate_result res(false);
- res.message() << "Different number of dimensions [" << info->num_dimensions()
+ PredicateResult res(false);
+ res.Message() << "Different number of dimensions [" << info->num_dimensions()
<< "!=" << expectedDimensions.size() << "]";
return res;
}
@@ -48,8 +48,8 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl
{
if (info->dimension(i) != expectedDimension)
{
- boost::test_tools::predicate_result res(false);
- res.message() << "For dimension " << i <<
+ PredicateResult res(false);
+ res.Message() << "For dimension " << i <<
" expected size " << expectedDimension <<
" got " << info->dimension(i);
return res;
@@ -58,7 +58,7 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl
i--;
}
- return true;
+ return PredicateResult(true);
}
template<typename IComputeTensorHandle>
@@ -97,7 +97,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory)
auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
- BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
+ auto result = CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3});
+ BOOST_TEST(result.m_Result, result.m_Message.str());
MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
@@ -105,7 +106,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory)
BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
auto inputHandle2 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
- BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
+ result = CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3});
+ BOOST_TEST(result.m_Result, result.m_Message.str());
BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
}
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
index 3e26364354..ffba19323a 100644
--- a/src/backends/aclCommon/test/MemCopyTests.cpp
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -48,28 +48,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index f68082762c..c6636554ea 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -225,10 +225,14 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get());
CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get());
- BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected));
- BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected));
- BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected));
- BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected));
+ auto result = CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
+ result = CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
+ result = CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
+ result = CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 7a9652a8ea..1c63542dcb 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -45,7 +45,8 @@ void LstmUtilsVectorBatchVectorAddTestImpl(
VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
// check shape and compare values
- BOOST_TEST(CompareTensors(batchVec, expectedOutput));
+ auto result = CompareTensors(batchVec, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
batchVecEncoder->Set(1.0f);
@@ -70,7 +71,8 @@ void LstmUtilsZeroVectorTestImpl(
ZeroVector(*outputEncoder, vSize);
// check shape and compare values
- BOOST_TEST(CompareTensors(input, expectedOutput));
+ auto result = CompareTensors(input, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
outputEncoder->Set(1.0f);
@@ -96,7 +98,8 @@ void LstmUtilsMeanStddevNormalizationTestImpl(
MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
// check shape and compare values
- BOOST_TEST(CompareTensors(input, expectedOutput));
+ auto result = CompareTensors(input, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
outputEncoder->Set(1.0f);
@@ -123,7 +126,8 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
// check shape and compare values
- BOOST_TEST(CompareTensors(batchVec, expectedOutput));
+ auto result = CompareTensors(batchVec, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
batchVecEncoder->Set(1.0f);
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 47e2f4e8d7..7602cbbc0b 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -21,8 +21,8 @@
#include <cl/workloads/ClWorkloads.hpp>
#include <cl/workloads/ClWorkloadUtils.hpp>
-boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
- std::initializer_list<unsigned int> expectedDimensions)
+armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
{
return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
}
@@ -43,8 +43,11 @@ static void ClCreateActivationWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 1}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
@@ -74,9 +77,12 @@ static void ClCreateElementwiseWorkloadTest()
auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
@@ -167,8 +173,11 @@ static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
@@ -192,15 +201,20 @@ static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- switch (dataLayout)
+ armnn::PredicateResult predResult(true);
+ switch (dataLayout)
{
case DataLayout::NHWC:
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 }));
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
break;
default: // NCHW
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 }));
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
}
@@ -239,9 +253,10 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
}
@@ -258,8 +273,10 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
}
@@ -470,8 +487,10 @@ static void ClDirectConvolution2dWorkloadTest()
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
@@ -503,8 +522,10 @@ static void ClCreateFullyConnectedWorkloadTest()
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {3, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -660,8 +681,10 @@ static void ClCreateReshapeWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 4}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 4});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
@@ -705,8 +728,10 @@ static void ClSoftmaxWorkloadTest()
tensorInfo.SetQuantizationScale(1.f / 256);
}
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -742,16 +767,20 @@ static void ClSplitterWorkloadTest()
// Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
SplitterQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
+ predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
+ predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {1, 7, 7}));
+ predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
@@ -931,8 +960,10 @@ static void ClCreateLogSoftmaxWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest)
@@ -952,8 +983,10 @@ static void ClCreateLstmWorkloadTest()
LstmQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 4});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
@@ -975,16 +1008,20 @@ static void ClResizeWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ armnn::PredicateResult predResult(true);
switch (dataLayout)
{
case DataLayout::NHWC:
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
break;
- case DataLayout::NCHW:
- default:
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ default: // DataLayout::NCHW
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
}
@@ -1033,8 +1070,10 @@ static void ClMeanWorkloadTest()
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
// The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 4 }));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload)
@@ -1067,9 +1106,12 @@ static void ClCreateConcatWorkloadTest(std::initializer_list<unsigned int> outpu
auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 }));
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+ auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, outputShape);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
@@ -1115,8 +1157,10 @@ static void ClSpaceToDepthWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 }));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
@@ -1161,10 +1205,12 @@ static void ClCreateStackWorkloadTest(const std::initializer_list<unsigned int>&
for (unsigned int i = 0; i < numInputs; ++i)
{
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+ auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape);
+ BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str());
}
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+ auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape);
+ BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp
index 3cd9af7910..c26f7bdae8 100644
--- a/src/backends/cl/test/ClMemCopyTests.cpp
+++ b/src/backends/cl/test/ClMemCopyTests.cpp
@@ -19,28 +19,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index c994bfe55a..a8c0c8aca0 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -23,8 +23,8 @@ BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
namespace
{
-boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
- std::initializer_list<unsigned int> expectedDimensions)
+armnn::PredicateResult CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
{
return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
}
@@ -564,16 +564,20 @@ static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ armnn::PredicateResult predResult(true);
switch (dataLayout)
{
case DataLayout::NHWC:
- BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
- BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
break;
- case DataLayout::NCHW:
- default:
- BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
- BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ default: // DataLayout::NCHW
+ predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
}
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index dbe1f8da3f..6a3d05d000 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -20,28 +20,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_SUITE_END()