aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2024-02-21 15:58:35 +0000
committerColm Donelan <colm.donelan@arm.com>2024-02-22 10:06:50 +0000
commit68c60e93d445cc51bd9f650aa3489f57d2227e13 (patch)
treeb8f4a10432e07dd1bd6e39f0650f0c0bc6b77d6d /src/backends/cl
parentb4ef16334900af33bf4321f28c90f62bf32238cd (diff)
downloadarmnn-68c60e93d445cc51bd9f650aa3489f57d2227e13.tar.gz
IVGCVSW-7854 Remove/rewrite asserts in the backends unit tests.
* Replace calls to ARMNN_ASSERT with DOCTEST CHECK. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: I8904d169b2099d57a344e319b2f14cf5d8392ae8
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp14
-rw-r--r--src/backends/cl/test/ClDefaultAllocatorTests.cpp4
-rw-r--r--src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp28
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp26
4 files changed, 34 insertions, 38 deletions
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 51ea0dc5d3..09418c2422 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -1,13 +1,12 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ClContextControlFixture.hpp"
#include "ClWorkloadFactoryHelper.hpp"
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnn/backends/MemCopyWorkload.hpp>
#include <armnnTestUtils/TensorCopyUtils.hpp>
@@ -331,11 +330,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFastMathEnabledWo
DataLayout::NCHW,
modelOptions);
- ARMNN_ASSERT(workload != nullptr);
+ CHECK(workload != nullptr);
auto conv2dWorkload = PolymorphicDowncast<ClConvolution2dWorkload*>(workload.get());
- IgnoreUnused(conv2dWorkload);
- ARMNN_ASSERT(conv2dWorkload != nullptr);
- ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
+ CHECK(conv2dWorkload != nullptr);
+ CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
}
TEST_CASE_FIXTURE(ClContextControlFixture, "ClReplaceInputOutputConvolution2dWorkload")
@@ -480,7 +478,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dClCompiledContext
workloadInfo,
clMemoryManager->GetIntraLayerManager(),
clCompileContext);
- ARMNN_ASSERT(workload != nullptr);
+ CHECK(workload != nullptr);
// Check built programs are not empty in context
CHECK(!clCompileContext.get_built_programs().empty());
}
diff --git a/src/backends/cl/test/ClDefaultAllocatorTests.cpp b/src/backends/cl/test/ClDefaultAllocatorTests.cpp
index 411a480815..24b8a09c9c 100644
--- a/src/backends/cl/test/ClDefaultAllocatorTests.cpp
+++ b/src/backends/cl/test/ClDefaultAllocatorTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -97,7 +97,7 @@ TEST_CASE("DefaultAllocatorTestMock")
// Initialize Mock Backend
MockBackendInitialiser initialiser;
auto factoryFun = BackendRegistryInstance().GetFactory(MockBackend().GetIdStatic());
- ARMNN_ASSERT(factoryFun != nullptr);
+ CHECK(factoryFun != nullptr);
auto backend = factoryFun();
auto defaultAllocator = backend->GetDefaultAllocator();
diff --git a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
index fee40fd257..46be3a122d 100644
--- a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
@@ -1,10 +1,8 @@
//
-// Copyright © 2021 Arm Ltd. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <armnn/utility/Assert.hpp>
-
#include <cl/ClImportTensorHandleFactory.hpp>
#include <doctest/doctest.h>
@@ -35,21 +33,21 @@ TEST_CASE("ImportTensorFactoryCreateMallocTensorHandle")
// Start with the TensorInfo factory method. Create an import tensor handle and verify the data is
// passed through correctly.
auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
- ARMNN_ASSERT(tensorHandle);
- ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
- ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
+ CHECK(tensorHandle);
+ CHECK(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ CHECK(tensorHandle->GetShape() == tensorShape);
// Same method but explicitly specifying isManaged = false.
tensorHandle = factory.CreateTensorHandle(tensorInfo, false);
CHECK(tensorHandle);
- ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
- ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
+ CHECK(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ CHECK(tensorHandle->GetShape() == tensorShape);
// Now try TensorInfo and DataLayout factory method.
tensorHandle = factory.CreateTensorHandle(tensorInfo, DataLayout::NHWC);
CHECK(tensorHandle);
- ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
- ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
+ CHECK(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+ CHECK(tensorHandle->GetShape() == tensorShape);
}
TEST_CASE("CreateSubtensorOfImportTensor")
@@ -67,8 +65,8 @@ TEST_CASE("CreateSubtensorOfImportTensor")
uint32_t origin[4] = { 1, 1, 0, 0 };
auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
CHECK(subTensor);
- ARMNN_ASSERT(subTensor->GetShape() == subTensorShape);
- ARMNN_ASSERT(subTensor->GetParent() == tensorHandle.get());
+ CHECK(subTensor->GetShape() == subTensorShape);
+ CHECK(subTensor->GetParent() == tensorHandle.get());
}
TEST_CASE("CreateSubtensorNonZeroXYIsInvalid")
@@ -87,7 +85,7 @@ TEST_CASE("CreateSubtensorNonZeroXYIsInvalid")
uint32_t origin[4] = { 0, 0, 1, 1 };
auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
// We expect a nullptr.
- ARMNN_ASSERT(subTensor == nullptr);
+ CHECK(subTensor == nullptr);
}
TEST_CASE("CreateSubtensorXYMustMatchParent")
@@ -105,7 +103,7 @@ TEST_CASE("CreateSubtensorXYMustMatchParent")
uint32_t origin[4] = { 1, 1, 0, 0 };
auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
// We expect a nullptr.
- ARMNN_ASSERT(subTensor == nullptr);
+ CHECK(subTensor == nullptr);
}
TEST_CASE("CreateSubtensorMustBeSmallerThanParent")
@@ -122,7 +120,7 @@ TEST_CASE("CreateSubtensorMustBeSmallerThanParent")
uint32_t origin[4] = { 1, 1, 0, 0 };
// This should result in a nullptr.
auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
- ARMNN_ASSERT(subTensor == nullptr);
+ CHECK(subTensor == nullptr);
}
}
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 39619e6421..259c091586 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -308,7 +308,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
size_t totalBytes = numElements * sizeof(float);
IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
- ARMNN_ASSERT(inputLayer);
+ CHECK(inputLayer);
armnn::ConstTensor weights(kernelInfo, kernel);
@@ -324,7 +324,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
- ARMNN_ASSERT(convLayer);
+ CHECK(convLayer);
weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
@@ -460,10 +460,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE
size_t totalBytesOutput = numElements * sizeof(float);
IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input");
- ARMNN_ASSERT(inputLayer);
+ CHECK(inputLayer);
armnn::IConnectableLayer* const convLayer = network.AddConvertFp16ToFp32Layer("convert");
- ARMNN_ASSERT(convLayer);
+ CHECK(convLayer);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -608,10 +608,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE
size_t totalBytesOutput = numElements * sizeof(Half);
IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input");
- ARMNN_ASSERT(inputLayer);
+ CHECK(inputLayer);
armnn::IConnectableLayer* const convLayer = network.AddConvertFp32ToFp16Layer("convert");
- ARMNN_ASSERT(convLayer);
+ CHECK(convLayer);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -747,10 +747,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16
size_t totalBytesOutput = numElements * sizeof(Half);
IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input");
- ARMNN_ASSERT(inputLayer);
+ CHECK(inputLayer);
armnn::IConnectableLayer* const convLayer = network.AddConvertFp32ToFp16Layer("convert");
- ARMNN_ASSERT(convLayer);
+ CHECK(convLayer);
inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -884,7 +884,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
size_t totalBytes = numElements * sizeof(float);
IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
- ARMNN_ASSERT(inputLayer);
+ CHECK(inputLayer);
armnn::ConstTensor weights(kernelInfo, kernel);
@@ -897,7 +897,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
convDesc2d.m_PadBottom = 1;
convDesc2d.m_DataLayout = DataLayout::NHWC;
armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
- ARMNN_ASSERT(convLayer);
+ CHECK(convLayer);
armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
@@ -1109,7 +1109,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
size_t totalBytes = numElements * sizeof(float);
IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
- ARMNN_ASSERT(inputLayer);
+ CHECK(inputLayer);
armnn::ConstTensor weights(kernelInfo, kernel);
@@ -1123,7 +1123,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
convDesc2d.m_DataLayout = DataLayout::NHWC;
armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
- ARMNN_ASSERT(convLayer);
+ CHECK(convLayer);
armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);