aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp22
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp13
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp4
8 files changed, 42 insertions, 43 deletions
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 9ba90578c8..84bf34dc60 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -127,7 +127,7 @@ void AsyncEndToEndTestImpl(INetworkPtr network,
float tolerance = 0.000001f,
size_t numThreads = 1)
{
- ARMNN_ASSERT(numThreads >= 1);
+ CHECK(numThreads >= 1);
const unsigned int numberOfInferences = numThreads == 1 ? 1 : 1000;
// Create Runtime in which test will run
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index b562a8af32..21b3951fe4 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1350,10 +1350,10 @@ LayerTestResult<T, 4> CompareActivationTestImpl(
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
data, info);
- ARMNN_ASSERT(workload != nullptr);
+ CHECK(workload != nullptr);
std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Activation,
refData, refInfo);
- ARMNN_ASSERT(workloadRef != nullptr);
+ CHECK(workloadRef != nullptr);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index c5da07279d..c5366ba2ac 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,13 +43,13 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
int outQuantOffset)
{
IgnoreUnused(memoryManager);
- ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
+ CHECK(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
- ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
+ CHECK(shape1.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
- ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
+ CHECK(outShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index a7a2364475..d3d9c882eb 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,7 +61,7 @@ bool NeedPermuteForConcat(
}
else
{
- ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+ CHECK_MESSAGE(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
"Input shapes must have the same number of dimensions");
}
}
@@ -92,7 +92,7 @@ void Generate3dPermuteVectorForConcat(
unsigned int & concatDim,
std::pair<PermutationVector, PermutationVector> & permutations)
{
- ARMNN_ASSERT_MSG(numDimensions <= 3,
+ CHECK_MESSAGE(numDimensions <= 3,
"Only dimensions 1,2 and 3 are supported by this helper");
unsigned int expandedBy = 3 - numDimensions;
unsigned int expandedConcatAxis = concatDim + expandedBy;
@@ -113,7 +113,7 @@ void Generate3dPermuteVectorForConcat(
}
else
{
- ARMNN_ASSERT(expandedConcatAxis == 0);
+ CHECK(expandedConcatAxis == 0);
concatDim = 0;
}
}
@@ -128,7 +128,7 @@ template<typename T> void PermuteTensorData(
std::vector<T>& outputData)
{
IgnoreUnused(memoryManager);
- ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+ CHECK_MESSAGE(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
// Nullptr is an error in the test. By returning without doing the concatenation
@@ -182,7 +182,7 @@ template<typename T> void PermuteInputsForConcat(
TensorInfo & outputTensorInfo)
{
IgnoreUnused(memoryManager);
- ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
+ CHECK_MESSAGE(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
unsigned int numDims = 0;
@@ -203,12 +203,12 @@ template<typename T> void PermuteInputsForConcat(
// Store the reverese permutation.
permuteVector = permutations.second;
- ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
+ CHECK_MESSAGE(!permuteVector.IsEqual(identity),
"Test logic error, we don't need permutation, so we shouldn't arrive here");
}
else
{
- ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+ CHECK_MESSAGE(numDims == tensorInfo.GetShape().GetNumDimensions(),
"All inputs must have the same number of dimensions");
}
@@ -249,7 +249,7 @@ template <typename T> void PermuteOutputForConcat(
std::unique_ptr<ITensorHandle> && inputDataHandle,
T * data)
{
- ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
+ CHECK_MESSAGE(data != nullptr, "data must not be null");
if (data == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
@@ -286,7 +286,7 @@ template<typename T> void Concatenate(
unsigned int concatDim,
bool useSubtensor)
{
- ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
+ CHECK_MESSAGE(output != nullptr, "output must not be null");
if (output == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 69a04df769..6fcb4d0c8b 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -162,9 +162,9 @@ template<typename T, typename B>
void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
{
- ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+ CHECK_MESSAGE(((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>())),
"Invalid type and parameter combination.");
- ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+ CHECK_MESSAGE(((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>())),
"Invalid type and parameter combination.");
// Note we need to dequantize and re-quantize the image value and the bias.
@@ -176,7 +176,7 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
for (uint32_t x = 0; x < w; ++x)
{
uint32_t offset = (i * h + y) * w + x;
- ARMNN_ASSERT(offset < v.size());
+ CHECK(offset < v.size());
T& outRef = v[offset];
float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
@@ -233,11 +233,11 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- ARMNN_ASSERT(inputNum == 1);
- ARMNN_ASSERT(outputNum == 1);
+ CHECK(inputNum == 1);
+ CHECK(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ CHECK((!biasEnabled || (bias.size() == outputChannels)));
// Note these tensors will use two (identical) batches.
armnn::TensorInfo inputTensorInfo =
@@ -1719,7 +1719,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
// If a bias is used, its size must equal the number of output channels.
bool biasEnabled = bias.size() > 0;
- ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ CHECK((!biasEnabled || (bias.size() == outputChannels)));
// Creates the tensors.
armnn::TensorInfo inputTensorInfo =
@@ -2277,11 +2277,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- ARMNN_ASSERT(inputNum == 1);
- ARMNN_ASSERT(outputNum == 1);
+ CHECK(inputNum == 1);
+ CHECK(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ CHECK((!biasEnabled || (bias.size() == outputChannels)));
// Note these tensors will use two (identical) batches.
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index d62ffedf3f..55e6dd05fd 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -57,9 +57,9 @@ void ApplyBiasToData(std::vector<T>& v, const std::vector<B>& bias,
float vScale, int32_t vOffset,
float bScale, int32_t bOffset)
{
- ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+ CHECK_MESSAGE(((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>())),
"Invalid type and parameter combination.");
- ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+ CHECK_MESSAGE(((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>())),
"Invalid type and parameter combination.");
for (uint32_t i = 0; i < bias.size(); ++i)
@@ -196,7 +196,7 @@ LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
bool biasEnabled = bias.size() > 0;
// If a bias is used, its size must equal the number of output channels.
- ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ CHECK((!biasEnabled || (bias.size() == outputChannels)));
// Creates the tensors.
armnn::TensorInfo inputTensorInfo({inputNum, inputDepth, inputHeight, inputWidth, inputChannels}, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
index 2bd9372fe9..691780a373 100644
--- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -1,11 +1,10 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LogicalTestImpl.hpp"
-#include <armnn/utility/Assert.hpp>
#include <ResolveType.hpp>
#include <armnn/backends/Workload.hpp>
@@ -29,10 +28,10 @@ LayerTestResult<uint8_t, NumDims> LogicalUnaryTestHelper(
std::vector<uint8_t> expectedOutput,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- ARMNN_ASSERT(inputShape.GetNumDimensions() == NumDims);
+ CHECK(inputShape.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Boolean);
- ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+ CHECK(outputShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
@@ -80,13 +79,13 @@ LayerTestResult<uint8_t, NumDims> LogicalBinaryTestHelper(
std::vector<uint8_t> expectedOutput,
const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- ARMNN_ASSERT(inputShape0.GetNumDimensions() == NumDims);
+ CHECK(inputShape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(inputShape0, armnn::DataType::Boolean);
- ARMNN_ASSERT(inputShape1.GetNumDimensions() == NumDims);
+ CHECK(inputShape1.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo1(inputShape1, armnn::DataType::Boolean);
- ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+ CHECK(outputShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 63375f0f2f..3f4453c61b 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -104,7 +104,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
outputHandle->Allocate();
CopyDataToITensorHandle(inputHandle.get(), input.data());
- ARMNN_ASSERT(workload);
+ CHECK(workload);
ExecuteWorkload(*workload, memoryManager);