aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2021-05-17 13:01:52 +0100
committerColm Donelan <Colm.Donelan@arm.com>2021-05-18 11:25:13 +0100
commit25ab3a8326a9e2c52c84b2747fa72907109a695d (patch)
tree1d4eaaf5b41c68a4e3b3ce2cc400c3ffd76d510c
parent1d239f5717e6e4adc47683e30a48b05e7511c734 (diff)
downloadarmnn-25ab3a8326a9e2c52c84b2747fa72907109a695d.tar.gz
IVGCVSW-5964 Removing some remaining boost utility usages from tests.
* Adding a basic PredicateResult class to replace boost::test_tools::predicate_result * Replacing all uses of boost::test_tools::predicate_result with the new armnn::PredicateResult class * Replacing use of boost::test_tools::output_test_stream output with std::ostringstream in ProfilerTests.cpp Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: I75cdbbff98d984e26e4a50c125386b2988516fad
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnn/test/PredicateResult.hpp48
-rw-r--r--src/armnn/test/ProfilerTests.cpp5
-rw-r--r--src/armnn/test/TensorHelpers.hpp15
-rw-r--r--src/armnn/test/UnitTests.hpp6
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp3
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp6
-rw-r--r--src/armnnUtils/ParserPrototxtFixture.hpp6
-rw-r--r--src/backends/aclCommon/test/CreateWorkloadClNeon.hpp22
-rw-r--r--src/backends/aclCommon/test/MemCopyTests.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp12
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp146
-rw-r--r--src/backends/cl/test/ClMemCopyTests.cpp12
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp20
-rw-r--r--src/backends/neon/test/NeonMemCopyTests.cpp12
16 files changed, 233 insertions, 105 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 50bc1c796f..dfdff89bbe 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -569,6 +569,7 @@ if(BUILD_UNIT_TESTS)
src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
src/armnn/test/OptionalTest.cpp
+ src/armnn/test/PredicateResult.hpp
src/armnn/test/ProfilerTests.cpp
src/armnn/test/ProfilingEventTest.cpp
src/armnn/test/ShapeInferenceTests.cpp
diff --git a/src/armnn/test/PredicateResult.hpp b/src/armnn/test/PredicateResult.hpp
new file mode 100644
index 0000000000..a344c8e3ad
--- /dev/null
+++ b/src/armnn/test/PredicateResult.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <sstream>
+
+namespace armnn
+{
+
+class PredicateResult
+{
+public:
+ explicit PredicateResult(bool result)
+ : m_Result(result)
+ {}
+
+ PredicateResult(const PredicateResult& predicateResult)
+ : m_Result(predicateResult.m_Result)
+ , m_Message(predicateResult.m_Message.str())
+ {}
+
+ void SetResult(bool newResult)
+ {
+ m_Result = newResult;
+ }
+
+ std::stringstream& Message()
+ {
+ return m_Message;
+ }
+
+ bool operator!() const
+ {
+ return !m_Result;
+ }
+
+ void operator=(PredicateResult otherPredicateResult)
+ {
+ otherPredicateResult.m_Result = m_Result;
+ }
+
+ bool m_Result;
+ std::stringstream m_Message;
+};
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index a0df3b6b62..21900ffb9a 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -8,7 +8,6 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
-#include <boost/test/tools/output_test_stream.hpp>
#include <memory>
#include <thread>
@@ -225,9 +224,9 @@ BOOST_AUTO_TEST_CASE(WriteEventResults)
size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
BOOST_TEST(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
- boost::test_tools::output_test_stream output;
+ std::ostringstream output;
profiler->AnalyzeEventsAndWriteResults(output);
- BOOST_TEST(!output.is_empty(false));
+ BOOST_TEST(!output.str().empty());
// output should contain event name 'test'
BOOST_CHECK(output.str().find("test") != std::string::npos);
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index fd9dd83770..ceb6d0f9d7 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -4,6 +4,7 @@
//
#pragma once
+#include "PredicateResult.hpp"
#include <armnn/Tensor.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnnUtils/FloatingPointComparison.hpp>
@@ -70,7 +71,7 @@ bool SelectiveCompareBoolean(T a, T b)
};
template <typename T, std::size_t n>
-boost::test_tools::predicate_result CompareTensors(const boost::multi_array<T, n>& a,
+armnn::PredicateResult CompareTensors(const boost::multi_array<T, n>& a,
const boost::multi_array<T, n>& b,
bool compareBoolean = false,
bool isDynamic = false)
@@ -84,8 +85,8 @@ boost::test_tools::predicate_result CompareTensors(const boost::multi_array<T, n
{
if (a.shape()[i] != b.shape()[i])
{
- boost::test_tools::predicate_result res(false);
- res.message() << "Different shapes ["
+ armnn::PredicateResult res(false);
+ res.Message() << "Different shapes ["
<< a.shape()[i]
<< "!="
<< b.shape()[i]
@@ -162,16 +163,16 @@ boost::test_tools::predicate_result CompareTensors(const boost::multi_array<T, n
}
}
- boost::test_tools::predicate_result comparisonResult(true);
+ armnn::PredicateResult comparisonResult(true);
if (numFailedElements > 0)
{
- comparisonResult = false;
- comparisonResult.message() << numFailedElements << " different values at: ";
+ comparisonResult.SetResult(false);
+ comparisonResult.Message() << numFailedElements << " different values at: ";
if (numFailedElements > maxReportedDifferences)
{
errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)";
}
- comparisonResult.message() << errorString.str();
+ comparisonResult.Message() << errorString.str();
}
return comparisonResult;
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index c15477bf19..b55b13d4c8 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -42,7 +42,8 @@ void CompareTestResultIfSupported(const std::string& testName, const LayerTestRe
"The test name does not match the supportedness it is reporting");
if (testResult.supported)
{
- BOOST_TEST(CompareTensors(testResult.output, testResult.outputExpected, testResult.compareBoolean));
+ auto result = CompareTensors(testResult.output, testResult.outputExpected, testResult.compareBoolean);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
@@ -56,7 +57,8 @@ void CompareTestResultIfSupported(const std::string& testName, const std::vector
"The test name does not match the supportedness it is reporting");
if (testResult[i].supported)
{
- BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected));
+ auto result = CompareTensors(testResult[i].output, testResult[i].outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
}
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index cea6a43454..5f5ec1c5f4 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -238,6 +238,7 @@ void ParserFlatbuffersSerializeFixture::RunTest(
armnn::BindingPointInfo bindingInfo = ConvertBindingInfo(
m_Parser->GetNetworkOutputBindingInfo(layersId, it.first));
auto outputExpected = MakeTensor<OutputDataType, NumOutputDimensions>(bindingInfo.second, it.second);
- BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ auto result = CompareTensors(outputExpected, outputStorage[it.first]);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index fc1d94e21f..f333ac0d40 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -321,7 +321,8 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second, isDynamic);
- BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic));
+ auto result = CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
@@ -420,6 +421,7 @@ void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
{
armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second);
- BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], false));
+ auto result = CompareTensors(outputExpected, outputStorage[it.first], false);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
} \ No newline at end of file
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 782c181982..ad991efa36 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -255,11 +255,13 @@ void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::ve
auto outputExpected = MakeTensor<T, NumOutputDimensions>(bindingInfo.second, it.second);
if (std::is_same<T, uint8_t>::value)
{
- BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], true));
+ auto result = CompareTensors(outputExpected, outputStorage[it.first], true);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
else
{
- BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+ auto result = CompareTensors(outputExpected, outputStorage[it.first]);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
}
}
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
index b14e148287..0a30907f55 100644
--- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -5,7 +5,7 @@
#pragma once
#include <test/CreateWorkload.hpp>
-
+#include <test/PredicateResult.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <backendsCommon/MemCopyWorkload.hpp>
#include <reference/RefWorkloadFactory.hpp>
@@ -27,8 +27,8 @@ namespace
using namespace std;
template<typename IComputeTensorHandle>
-boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
- std::initializer_list<unsigned int> expectedDimensions)
+PredicateResult CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
{
arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
@@ -36,8 +36,8 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl
auto numExpectedDims = expectedDimensions.size();
if (infoNumDims != numExpectedDims)
{
- boost::test_tools::predicate_result res(false);
- res.message() << "Different number of dimensions [" << info->num_dimensions()
+ PredicateResult res(false);
+ res.Message() << "Different number of dimensions [" << info->num_dimensions()
<< "!=" << expectedDimensions.size() << "]";
return res;
}
@@ -48,8 +48,8 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl
{
if (info->dimension(i) != expectedDimension)
{
- boost::test_tools::predicate_result res(false);
- res.message() << "For dimension " << i <<
+ PredicateResult res(false);
+ res.Message() << "For dimension " << i <<
" expected size " << expectedDimension <<
" got " << info->dimension(i);
return res;
@@ -58,7 +58,7 @@ boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandl
i--;
}
- return true;
+ return PredicateResult(true);
}
template<typename IComputeTensorHandle>
@@ -97,7 +97,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory)
auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
- BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
+ auto result = CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3});
+ BOOST_TEST(result.m_Result, result.m_Message.str());
MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
@@ -105,7 +106,8 @@ void CreateMemCopyWorkloads(IWorkloadFactory& factory)
BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
auto inputHandle2 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
- BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
+ result = CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3});
+ BOOST_TEST(result.m_Result, result.m_Message.str());
BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
}
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
index 3e26364354..ffba19323a 100644
--- a/src/backends/aclCommon/test/MemCopyTests.cpp
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -48,28 +48,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index f68082762c..c6636554ea 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -225,10 +225,14 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get());
CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get());
- BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected));
- BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected));
- BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected));
- BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected));
+ auto result = CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
+ result = CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
+ result = CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
+ result = CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
}
template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 7a9652a8ea..1c63542dcb 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -45,7 +45,8 @@ void LstmUtilsVectorBatchVectorAddTestImpl(
VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
// check shape and compare values
- BOOST_TEST(CompareTensors(batchVec, expectedOutput));
+ auto result = CompareTensors(batchVec, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
batchVecEncoder->Set(1.0f);
@@ -70,7 +71,8 @@ void LstmUtilsZeroVectorTestImpl(
ZeroVector(*outputEncoder, vSize);
// check shape and compare values
- BOOST_TEST(CompareTensors(input, expectedOutput));
+ auto result = CompareTensors(input, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
outputEncoder->Set(1.0f);
@@ -96,7 +98,8 @@ void LstmUtilsMeanStddevNormalizationTestImpl(
MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
// check shape and compare values
- BOOST_TEST(CompareTensors(input, expectedOutput));
+ auto result = CompareTensors(input, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
outputEncoder->Set(1.0f);
@@ -123,7 +126,8 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
// check shape and compare values
- BOOST_TEST(CompareTensors(batchVec, expectedOutput));
+ auto result = CompareTensors(batchVec, expectedOutput);
+ BOOST_TEST(result.m_Result, result.m_Message.str());
// check if iterator is back at start position
batchVecEncoder->Set(1.0f);
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 47e2f4e8d7..7602cbbc0b 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -21,8 +21,8 @@
#include <cl/workloads/ClWorkloads.hpp>
#include <cl/workloads/ClWorkloadUtils.hpp>
-boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
- std::initializer_list<unsigned int> expectedDimensions)
+armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
{
return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
}
@@ -43,8 +43,11 @@ static void ClCreateActivationWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 1}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
@@ -74,9 +77,12 @@ static void ClCreateElementwiseWorkloadTest()
auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
@@ -167,8 +173,11 @@ static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
@@ -192,15 +201,20 @@ static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- switch (dataLayout)
+ armnn::PredicateResult predResult(true);
+ switch (dataLayout)
{
case DataLayout::NHWC:
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 }));
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
break;
default: // NCHW
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 }));
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
}
@@ -239,9 +253,10 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
}
@@ -258,8 +273,10 @@ BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
}
@@ -470,8 +487,10 @@ static void ClDirectConvolution2dWorkloadTest()
Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
@@ -503,8 +522,10 @@ static void ClCreateFullyConnectedWorkloadTest()
FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {3, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -660,8 +681,10 @@ static void ClCreateReshapeWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 4}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {1, 4});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
@@ -705,8 +728,10 @@ static void ClSoftmaxWorkloadTest()
tensorInfo.SetQuantizationScale(1.f / 256);
}
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
@@ -742,16 +767,20 @@ static void ClSplitterWorkloadTest()
// Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
SplitterQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
+ predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
+ predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {1, 7, 7}));
+ predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
@@ -931,8 +960,10 @@ static void ClCreateLogSoftmaxWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest)
@@ -952,8 +983,10 @@ static void ClCreateLstmWorkloadTest()
LstmQueueDescriptor queueDescriptor = workload->GetData();
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, {2, 4});
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
@@ -975,16 +1008,20 @@ static void ClResizeWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ armnn::PredicateResult predResult(true);
switch (dataLayout)
{
case DataLayout::NHWC:
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
break;
- case DataLayout::NCHW:
- default:
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ default: // DataLayout::NCHW
+ predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
}
@@ -1033,8 +1070,10 @@ static void ClMeanWorkloadTest()
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
// The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 4 }));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 3, 7, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload)
@@ -1067,9 +1106,12 @@ static void ClCreateConcatWorkloadTest(std::initializer_list<unsigned int> outpu
auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 }));
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+ auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, outputShape);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
@@ -1115,8 +1157,10 @@ static void ClSpaceToDepthWorkloadTest()
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 }));
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 }));
+ auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
@@ -1161,10 +1205,12 @@ static void ClCreateStackWorkloadTest(const std::initializer_list<unsigned int>&
for (unsigned int i = 0; i < numInputs; ++i)
{
auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
- BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+ auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape);
+ BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str());
}
auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+ auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape);
+ BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp
index 3cd9af7910..c26f7bdae8 100644
--- a/src/backends/cl/test/ClMemCopyTests.cpp
+++ b/src/backends/cl/test/ClMemCopyTests.cpp
@@ -19,28 +19,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index c994bfe55a..a8c0c8aca0 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -23,8 +23,8 @@ BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
namespace
{
-boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
- std::initializer_list<unsigned int> expectedDimensions)
+armnn::PredicateResult CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
{
return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
}
@@ -564,16 +564,20 @@ static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ armnn::PredicateResult predResult(true);
switch (dataLayout)
{
case DataLayout::NHWC:
- BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
- BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
break;
- case DataLayout::NCHW:
- default:
- BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
- BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ default: // DataLayout::NCHW
+ predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+ predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
}
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index dbe1f8da3f..6a3d05d000 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -20,28 +20,32 @@ BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
{
LayerTestResult<float, 4> result =
MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
- BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+ auto predResult = CompareTensors(result.output, result.outputExpected);
+ BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
}
BOOST_AUTO_TEST_SUITE_END()