From 68c60e93d445cc51bd9f650aa3489f57d2227e13 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Wed, 21 Feb 2024 15:58:35 +0000 Subject: IVGCVSW-7854 Remove/rewrite asserts in the backends unit tests. * Replace calls to ARMNN_ASSERT with DOCTEST CHECK. Signed-off-by: Colm Donelan Change-Id: I8904d169b2099d57a344e319b2f14cf5d8392ae8 --- .../test/StridedSliceAsyncEndToEndTest.hpp | 4 ++-- .../test/layerTests/ActivationTestImpl.cpp | 6 ++--- .../test/layerTests/ComparisonTestImpl.cpp | 8 +++---- .../test/layerTests/ConcatTestImpl.cpp | 20 ++++++++-------- .../test/layerTests/Conv2dTestImpl.cpp | 22 ++++++++--------- .../test/layerTests/Conv3dTestImpl.cpp | 8 +++---- .../test/layerTests/LogicalTestImpl.cpp | 13 +++++----- .../test/layerTests/SoftmaxTestImpl.cpp | 4 ++-- src/backends/cl/test/ClCreateWorkloadTests.cpp | 14 +++++------ src/backends/cl/test/ClDefaultAllocatorTests.cpp | 4 ++-- .../cl/test/ClImportTensorHandleFactoryTests.cpp | 28 ++++++++++------------ src/backends/cl/test/ClImportTensorHandleTests.cpp | 26 ++++++++++---------- .../reference/test/RefTensorHandleTests.cpp | 4 ++-- 13 files changed, 78 insertions(+), 83 deletions(-) diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp index 9ba90578c8..84bf34dc60 100644 --- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp +++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021,2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -127,7 +127,7 @@ void AsyncEndToEndTestImpl(INetworkPtr network, float tolerance = 0.000001f, size_t numThreads = 1) { - ARMNN_ASSERT(numThreads >= 1); + CHECK(numThreads >= 1); const unsigned int numberOfInferences = numThreads == 1 ? 1 : 1000; // Create Runtime in which test will run diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp index b562a8af32..21b3951fe4 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -1350,10 +1350,10 @@ LayerTestResult CompareActivationTestImpl( std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation, data, info); - ARMNN_ASSERT(workload != nullptr); + CHECK(workload != nullptr); std::unique_ptr workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Activation, refData, refInfo); - ARMNN_ASSERT(workloadRef != nullptr); + CHECK(workloadRef != nullptr); inputHandle->Allocate(); outputHandle->Allocate(); diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp index c5da07279d..c5366ba2ac 100644 --- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2019, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,13 +43,13 @@ LayerTestResult ComparisonTestImpl( int outQuantOffset) { IgnoreUnused(memoryManager); - ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims); + CHECK(shape0.GetNumDimensions() == NumDims); armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0); - ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims); + CHECK(shape1.GetNumDimensions() == NumDims); armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1); - ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims); + CHECK(outShape.GetNumDimensions() == NumDims); armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset); std::vector actualOutput(outputTensorInfo.GetNumElements()); diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp index a7a2364475..d3d9c882eb 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -61,7 +61,7 @@ bool NeedPermuteForConcat( } else { - ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(), + CHECK_MESSAGE(nDimensions == tensorInfo.GetShape().GetNumDimensions(), "Input shapes must have the same number of dimensions"); } } @@ -92,7 +92,7 @@ void Generate3dPermuteVectorForConcat( unsigned int & concatDim, std::pair & permutations) { - ARMNN_ASSERT_MSG(numDimensions <= 3, + CHECK_MESSAGE(numDimensions <= 3, "Only dimensions 1,2 and 3 are supported by this helper"); unsigned int expandedBy = 3 - numDimensions; unsigned int expandedConcatAxis = concatDim + expandedBy; @@ -113,7 +113,7 @@ void Generate3dPermuteVectorForConcat( } else { - ARMNN_ASSERT(expandedConcatAxis == 0); + CHECK(expandedConcatAxis == 0); concatDim = 0; } } @@ -128,7 +128,7 @@ template void PermuteTensorData( std::vector& outputData) { IgnoreUnused(memoryManager); - ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null"); + CHECK_MESSAGE(inputData != nullptr, "inputData must not be null"); if (inputData == nullptr) { // Nullptr is an error in the test. By returning without doing the concatenation @@ -182,7 +182,7 @@ template void PermuteInputsForConcat( TensorInfo & outputTensorInfo) { IgnoreUnused(memoryManager); - ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1, + CHECK_MESSAGE(inputTensorInfos.size() > 1, "Expecting more than one tensor to be concatenated here"); unsigned int numDims = 0; @@ -203,12 +203,12 @@ template void PermuteInputsForConcat( // Store the reverese permutation. permuteVector = permutations.second; - ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity), + CHECK_MESSAGE(!permuteVector.IsEqual(identity), "Test logic error, we don't need permutation, so we shouldn't arrive here"); } else { - ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(), + CHECK_MESSAGE(numDims == tensorInfo.GetShape().GetNumDimensions(), "All inputs must have the same number of dimensions"); } @@ -249,7 +249,7 @@ template void PermuteOutputForConcat( std::unique_ptr && inputDataHandle, T * data) { - ARMNN_ASSERT_MSG(data != nullptr, "data must not be null"); + CHECK_MESSAGE(data != nullptr, "data must not be null"); if (data == nullptr) { // Nullptr is an error in the test. By returning without doing the permutation @@ -286,7 +286,7 @@ template void Concatenate( unsigned int concatDim, bool useSubtensor) { - ARMNN_ASSERT_MSG(output != nullptr, "output must not be null"); + CHECK_MESSAGE(output != nullptr, "output must not be null"); if (output == nullptr) { // Nullptr is an error in the test. By returning without doing the permutation diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp index 69a04df769..6fcb4d0c8b 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2022, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -162,9 +162,9 @@ template void ApplyBias(std::vector& v, float vScale, int32_t vOffset, const std::vector& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h) { - ARMNN_ASSERT_MSG((armnn::IsQuantizedType() && vScale != 0.0f) || (!armnn::IsQuantizedType()), + CHECK_MESSAGE(((armnn::IsQuantizedType() && vScale != 0.0f) || (!armnn::IsQuantizedType())), "Invalid type and parameter combination."); - ARMNN_ASSERT_MSG((armnn::IsQuantizedType() && bScale != 0.0f) || (!armnn::IsQuantizedType()), + CHECK_MESSAGE(((armnn::IsQuantizedType() && bScale != 0.0f) || (!armnn::IsQuantizedType())), "Invalid type and parameter combination."); // Note we need to dequantize and re-quantize the image value and the bias. @@ -176,7 +176,7 @@ void ApplyBias(std::vector& v, float vScale, int32_t vOffset, for (uint32_t x = 0; x < w; ++x) { uint32_t offset = (i * h + y) * w + x; - ARMNN_ASSERT(offset < v.size()); + CHECK(offset < v.size()); T& outRef = v[offset]; float dOutput = SelectiveDequantize(outRef, vScale, vOffset); outRef = SelectiveQuantize(dOutput + dBias, vScale, vOffset); @@ -233,11 +233,11 @@ LayerTestResult SimpleConvolution2dTestImpl( bool biasEnabled = bias.size() > 0; // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches). - ARMNN_ASSERT(inputNum == 1); - ARMNN_ASSERT(outputNum == 1); + CHECK(inputNum == 1); + CHECK(outputNum == 1); // If a bias is used, its size must equal the number of output channels. - ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); + CHECK((!biasEnabled || (bias.size() == outputChannels))); // Note these tensors will use two (identical) batches. armnn::TensorInfo inputTensorInfo = @@ -1719,7 +1719,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( // If a bias is used, its size must equal the number of output channels. bool biasEnabled = bias.size() > 0; - ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); + CHECK((!biasEnabled || (bias.size() == outputChannels))); // Creates the tensors. armnn::TensorInfo inputTensorInfo = @@ -2277,11 +2277,11 @@ LayerTestResult DepthwiseConvolution2dTestImpl( bool biasEnabled = bias.size() > 0; // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches). - ARMNN_ASSERT(inputNum == 1); - ARMNN_ASSERT(outputNum == 1); + CHECK(inputNum == 1); + CHECK(outputNum == 1); // If a bias is used, its size must equal the number of output channels. - ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); + CHECK((!biasEnabled || (bias.size() == outputChannels))); // Note these tensors will use two (identical) batches. diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp index d62ffedf3f..55e6dd05fd 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -57,9 +57,9 @@ void ApplyBiasToData(std::vector& v, const std::vector& bias, float vScale, int32_t vOffset, float bScale, int32_t bOffset) { - ARMNN_ASSERT_MSG((armnn::IsQuantizedType() && vScale != 0.0f) || (!armnn::IsQuantizedType()), + CHECK_MESSAGE(((armnn::IsQuantizedType() && vScale != 0.0f) || (!armnn::IsQuantizedType())), "Invalid type and parameter combination."); - ARMNN_ASSERT_MSG((armnn::IsQuantizedType() && bScale != 0.0f) || (!armnn::IsQuantizedType()), + CHECK_MESSAGE(((armnn::IsQuantizedType() && bScale != 0.0f) || (!armnn::IsQuantizedType())), "Invalid type and parameter combination."); for (uint32_t i = 0; i < bias.size(); ++i) @@ -196,7 +196,7 @@ LayerTestResult SimpleConvolution3dTestImpl( bool biasEnabled = bias.size() > 0; // If a bias is used, its size must equal the number of output channels. - ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels); + CHECK((!biasEnabled || (bias.size() == outputChannels))); // Creates the tensors. armnn::TensorInfo inputTensorInfo({inputNum, inputDepth, inputHeight, inputWidth, inputChannels}, ArmnnType); diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp index 2bd9372fe9..691780a373 100644 --- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp @@ -1,11 +1,10 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LogicalTestImpl.hpp" -#include #include #include @@ -29,10 +28,10 @@ LayerTestResult LogicalUnaryTestHelper( std::vector expectedOutput, const armnn::ITensorHandleFactory& tensorHandleFactory) { - ARMNN_ASSERT(inputShape.GetNumDimensions() == NumDims); + CHECK(inputShape.GetNumDimensions() == NumDims); armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Boolean); - ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims); + CHECK(outputShape.GetNumDimensions() == NumDims); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean); std::vector actualOutput(outputTensorInfo.GetNumElements()); @@ -80,13 +79,13 @@ LayerTestResult LogicalBinaryTestHelper( std::vector expectedOutput, const armnn::ITensorHandleFactory& tensorHandleFactory) { - ARMNN_ASSERT(inputShape0.GetNumDimensions() == NumDims); + CHECK(inputShape0.GetNumDimensions() == NumDims); armnn::TensorInfo inputTensorInfo0(inputShape0, armnn::DataType::Boolean); - ARMNN_ASSERT(inputShape1.GetNumDimensions() == NumDims); + CHECK(inputShape1.GetNumDimensions() == NumDims); armnn::TensorInfo inputTensorInfo1(inputShape1, armnn::DataType::Boolean); - ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims); + CHECK(outputShape.GetNumDimensions() == NumDims); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean); std::vector actualOutput(outputTensorInfo.GetNumElements()); diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp index 63375f0f2f..3f4453c61b 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -104,7 +104,7 @@ LayerTestResult SimpleSoftmaxBaseTestImpl( outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), input.data()); - ARMNN_ASSERT(workload); + CHECK(workload); ExecuteWorkload(*workload, memoryManager); diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index 51ea0dc5d3..09418c2422 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -1,13 +1,12 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ClContextControlFixture.hpp" #include "ClWorkloadFactoryHelper.hpp" -#include -#include +#include #include #include #include @@ -331,11 +330,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFastMathEnabledWo DataLayout::NCHW, modelOptions); - ARMNN_ASSERT(workload != nullptr); + CHECK(workload != nullptr); auto conv2dWorkload = PolymorphicDowncast(workload.get()); - IgnoreUnused(conv2dWorkload); - ARMNN_ASSERT(conv2dWorkload != nullptr); - ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD); + CHECK(conv2dWorkload != nullptr); + CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD); } TEST_CASE_FIXTURE(ClContextControlFixture, "ClReplaceInputOutputConvolution2dWorkload") @@ -480,7 +478,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dClCompiledContext workloadInfo, clMemoryManager->GetIntraLayerManager(), clCompileContext); - ARMNN_ASSERT(workload != nullptr); + CHECK(workload != nullptr); // Check built programs are not empty in context CHECK(!clCompileContext.get_built_programs().empty()); } diff --git a/src/backends/cl/test/ClDefaultAllocatorTests.cpp b/src/backends/cl/test/ClDefaultAllocatorTests.cpp index 411a480815..24b8a09c9c 100644 --- a/src/backends/cl/test/ClDefaultAllocatorTests.cpp +++ b/src/backends/cl/test/ClDefaultAllocatorTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -97,7 +97,7 @@ TEST_CASE("DefaultAllocatorTestMock") // Initialize Mock Backend MockBackendInitialiser initialiser; auto factoryFun = BackendRegistryInstance().GetFactory(MockBackend().GetIdStatic()); - ARMNN_ASSERT(factoryFun != nullptr); + CHECK(factoryFun != nullptr); auto backend = factoryFun(); auto defaultAllocator = backend->GetDefaultAllocator(); diff --git a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp index fee40fd257..46be3a122d 100644 --- a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp @@ -1,10 +1,8 @@ // -// Copyright © 2021 Arm Ltd. All rights reserved. +// Copyright © 2021, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include - #include #include @@ -35,21 +33,21 @@ TEST_CASE("ImportTensorFactoryCreateMallocTensorHandle") // Start with the TensorInfo factory method. Create an import tensor handle and verify the data is // passed through correctly. auto tensorHandle = factory.CreateTensorHandle(tensorInfo); - ARMNN_ASSERT(tensorHandle); - ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast(MemorySource::Malloc)); - ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape); + CHECK(tensorHandle); + CHECK(tensorHandle->GetImportFlags() == static_cast(MemorySource::Malloc)); + CHECK(tensorHandle->GetShape() == tensorShape); // Same method but explicitly specifying isManaged = false. tensorHandle = factory.CreateTensorHandle(tensorInfo, false); CHECK(tensorHandle); - ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast(MemorySource::Malloc)); - ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape); + CHECK(tensorHandle->GetImportFlags() == static_cast(MemorySource::Malloc)); + CHECK(tensorHandle->GetShape() == tensorShape); // Now try TensorInfo and DataLayout factory method. tensorHandle = factory.CreateTensorHandle(tensorInfo, DataLayout::NHWC); CHECK(tensorHandle); - ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast(MemorySource::Malloc)); - ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape); + CHECK(tensorHandle->GetImportFlags() == static_cast(MemorySource::Malloc)); + CHECK(tensorHandle->GetShape() == tensorShape); } TEST_CASE("CreateSubtensorOfImportTensor") @@ -67,8 +65,8 @@ TEST_CASE("CreateSubtensorOfImportTensor") uint32_t origin[4] = { 1, 1, 0, 0 }; auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin); CHECK(subTensor); - ARMNN_ASSERT(subTensor->GetShape() == subTensorShape); - ARMNN_ASSERT(subTensor->GetParent() == tensorHandle.get()); + CHECK(subTensor->GetShape() == subTensorShape); + CHECK(subTensor->GetParent() == tensorHandle.get()); } TEST_CASE("CreateSubtensorNonZeroXYIsInvalid") @@ -87,7 +85,7 @@ TEST_CASE("CreateSubtensorNonZeroXYIsInvalid") uint32_t origin[4] = { 0, 0, 1, 1 }; auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin); // We expect a nullptr. - ARMNN_ASSERT(subTensor == nullptr); + CHECK(subTensor == nullptr); } TEST_CASE("CreateSubtensorXYMustMatchParent") @@ -105,7 +103,7 @@ TEST_CASE("CreateSubtensorXYMustMatchParent") uint32_t origin[4] = { 1, 1, 0, 0 }; auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin); // We expect a nullptr. - ARMNN_ASSERT(subTensor == nullptr); + CHECK(subTensor == nullptr); } TEST_CASE("CreateSubtensorMustBeSmallerThanParent") @@ -122,7 +120,7 @@ TEST_CASE("CreateSubtensorMustBeSmallerThanParent") uint32_t origin[4] = { 1, 1, 0, 0 }; // This should result in a nullptr. auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin); - ARMNN_ASSERT(subTensor == nullptr); + CHECK(subTensor == nullptr); } } diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 39619e6421..259c091586 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -308,7 +308,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd") size_t totalBytes = numElements * sizeof(float); IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input"); - ARMNN_ASSERT(inputLayer); + CHECK(inputLayer); armnn::ConstTensor weights(kernelInfo, kernel); @@ -324,7 +324,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd") armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv"); armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights); - ARMNN_ASSERT(convLayer); + CHECK(convLayer); weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u)); @@ -460,10 +460,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE size_t totalBytesOutput = numElements * sizeof(float); IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input"); - ARMNN_ASSERT(inputLayer); + CHECK(inputLayer); armnn::IConnectableLayer* const convLayer = network.AddConvertFp16ToFp32Layer("convert"); - ARMNN_ASSERT(convLayer); + CHECK(convLayer); inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); @@ -608,10 +608,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE size_t totalBytesOutput = numElements * sizeof(Half); IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input"); - ARMNN_ASSERT(inputLayer); + CHECK(inputLayer); armnn::IConnectableLayer* const convLayer = network.AddConvertFp32ToFp16Layer("convert"); - ARMNN_ASSERT(convLayer); + CHECK(convLayer); inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); @@ -747,10 +747,10 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16 size_t totalBytesOutput = numElements * sizeof(Half); IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input"); - ARMNN_ASSERT(inputLayer); + CHECK(inputLayer); armnn::IConnectableLayer* const convLayer = network.AddConvertFp32ToFp16Layer("convert"); - ARMNN_ASSERT(convLayer); + CHECK(convLayer); inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); @@ -884,7 +884,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo size_t totalBytes = numElements * sizeof(float); IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input"); - ARMNN_ASSERT(inputLayer); + CHECK(inputLayer); armnn::ConstTensor weights(kernelInfo, kernel); @@ -897,7 +897,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo convDesc2d.m_PadBottom = 1; convDesc2d.m_DataLayout = DataLayout::NHWC; armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv"); - ARMNN_ASSERT(convLayer); + CHECK(convLayer); armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights); @@ -1109,7 +1109,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver size_t totalBytes = numElements * sizeof(float); IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input"); - ARMNN_ASSERT(inputLayer); + CHECK(inputLayer); armnn::ConstTensor weights(kernelInfo, kernel); @@ -1123,7 +1123,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver convDesc2d.m_DataLayout = DataLayout::NHWC; armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv"); - ARMNN_ASSERT(convLayer); + CHECK(convLayer); armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights); diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp index 883df6fe4d..792522851a 100644 --- a/src/backends/reference/test/RefTensorHandleTests.cpp +++ b/src/backends/reference/test/RefTensorHandleTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2022, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -190,7 +190,7 @@ TEST_CASE("RefTensorHandleSupportsInPlaceComputation") RefTensorHandleFactory handleFactory(memoryManager); // RefTensorHandleFactory does not support InPlaceComputation - ARMNN_ASSERT(!(handleFactory.SupportsInPlaceComputation())); + CHECK(!(handleFactory.SupportsInPlaceComputation())); } TEST_CASE("TestManagedConstTensorHandle") -- cgit v1.2.1