aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2024-02-01 15:00:43 +0000
committerColm Donelan <colm.donelan@arm.com>2024-02-21 09:36:37 +0000
commitb4ef16334900af33bf4321f28c90f62bf32238cd (patch)
tree0d8299e44df109d95ce21bf56b9441019e6c7403 /src/backends/neon
parent04a0da655f89e1c024cf16f31ab30176364c9362 (diff)
downloadarmnn-b4ef16334900af33bf4321f28c90f62bf32238cd.tar.gz
IVGCVSW-7854 Remove/rewrite asserts in the backends.
* Identify usages of ARMNN_ASSERT that should be proper exceptions. * Change ARMNN_ASSERT in Doctests to CHECK. * Verify any remaining assertions are reasonable. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: Ifd1f2a5a4bb60135e8654305035ec70e09c4dc2d
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonTensorHandle.hpp7
-rw-r--r--src/backends/neon/NeonTimer.cpp8
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp25
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp7
-rw-r--r--src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonConvolution3dWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonGatherNdWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSqrtWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp11
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp9
14 files changed, 48 insertions, 64 deletions
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index e5f210773d..303676043e 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,10 +8,9 @@
#include <BFloat16.hpp>
#include <Half.hpp>
-#include <armnn/utility/Assert.hpp>
-
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/Exceptions.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/MemoryGroup.h>
@@ -68,7 +67,7 @@ public:
// If we have enabled Importing, don't manage the tensor
if (!m_IsImportEnabled)
{
- ARMNN_ASSERT(m_MemoryGroup != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_MemoryGroup, "arm_compute::MemoryGroup is null.");
m_MemoryGroup->manage(&m_Tensor);
}
}
diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp
index dbb1503d24..88d8cb02e9 100644
--- a/src/backends/neon/NeonTimer.cpp
+++ b/src/backends/neon/NeonTimer.cpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTimer.hpp"
#include "NeonInterceptorScheduler.hpp"
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <memory>
@@ -21,7 +20,10 @@ static thread_local auto g_Interceptor = std::make_shared<NeonInterceptorSchedul
void NeonTimer::Start()
{
m_Kernels.clear();
- ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
+ if (g_Interceptor->GetKernels() != nullptr)
+ {
+ throw RuntimeException("This NeonTimer instance has already been started.");
+ }
g_Interceptor->SetKernels(&m_Kernels);
m_RealSchedulerType = arm_compute::Scheduler::get_type();
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 96429a84e1..9c32e32375 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,13 +1,11 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonWorkloadFactoryHelper.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnn/backends/MemCopyWorkload.hpp>
@@ -283,27 +281,20 @@ TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
{
Graph graph;
- using ModelOptions = std::vector<BackendOptions>;
+ using ModelOptions = std::vector<BackendOptions>;
ModelOptions modelOptions = {};
- BackendOptions cpuAcc("CpuAcc",
- {
- { "FastMathEnabled", true }
- });
+ BackendOptions cpuAcc("CpuAcc", { { "FastMathEnabled", true } });
modelOptions.push_back(cpuAcc);
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager(), modelOptions);
- auto workload =
- CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory,
- graph,
- DataLayout::NCHW,
- modelOptions);
+ auto workload = CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(
+ factory, graph, DataLayout::NCHW, modelOptions);
- ARMNN_ASSERT(workload != nullptr);
+ CHECK(workload != nullptr);
auto conv2dWorkload = PolymorphicDowncast<NeonConvolution2dWorkload*>(workload.get());
- IgnoreUnused(conv2dWorkload);
- ARMNN_ASSERT(conv2dWorkload != nullptr);
- ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
+ CHECK(conv2dWorkload != nullptr);
+ CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
}
template <typename armnn::DataType DataType>
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index bc8ad5de5a..d6fd081c65 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -1,10 +1,12 @@
//
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <Graph.hpp>
#include <Network.hpp>
+#include <aclCommon/BaseMemoryManager.hpp>
+
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTensorHandleFactory.hpp>
@@ -16,7 +18,6 @@
#include <CommonTestUtils.hpp>
#include <doctest/doctest.h>
-#include <armnn/utility/Assert.hpp>
TEST_SUITE("NeonTensorHandleTests")
{
@@ -190,7 +191,7 @@ TEST_CASE("NeonTensorHandleSupportsInPlaceComputation")
NeonTensorHandleFactory handleFactory(memoryManager);
// NeonTensorHandleFactory supports InPlaceComputation
- ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
+ CHECK(handleFactory.SupportsInPlaceComputation() == true);
}
}
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
index a44c9aa0d4..8cd36db355 100644
--- a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -76,7 +76,8 @@ NeonChannelShuffleWorkload::NeonChannelShuffleWorkload(const ChannelShuffleQueue
aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported axis");
+ throw InvalidArgumentException("Value for axis: " + std::to_string(descriptor.m_Parameters.m_Axis) +
+ " is not valid");
break;
}
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index f5b0128dc7..270e3fad5c 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -65,7 +65,7 @@ void NeonConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- ARMNN_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
arm_compute::ITensor& output =
PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
@@ -116,8 +116,7 @@ void NeonConstantWorkload::Execute() const
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unknown data type");
- break;
+ throw InvalidArgumentException("Unknown data type.");
}
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index c81022b915..fdc52ef797 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -95,8 +95,6 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_BiasTensor, info.m_InputTensorInfos[2], m_Data.m_Parameters.m_DataLayout);
m_BiasTensor->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
- // We assume here that NeonConvolution2dWorkloadValidate has been called before the constructor.
- ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
}
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -141,8 +139,6 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
GetGuid());
m_ConvolutionLayer.reset(convolutionLayer.release());
-
- ARMNN_ASSERT(m_ConvolutionLayer);
m_KernelTensorInfo = info.m_InputTensorInfos[1];
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
index 5bf6e100ed..ef03dde542 100644
--- a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -105,9 +105,6 @@ NeonConvolution3dWorkload::NeonConvolution3dWorkload(const Convolution3dQueueDes
this->GetGuid());
m_ConvolutionLayer.reset(convolutionLayer.release());
-
- ARMNN_ASSERT(m_ConvolutionLayer);
-
m_ConvolutionLayer->prepare();
}
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index b9e9ebb785..de6601f336 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -146,15 +146,12 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
detailsInfo,
GetGuid());
- ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
-
m_pDepthwiseConvolutionLayer->prepare();
}
void NeonDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthwiseConvolutionWorkload_Execute");
- ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 9503abdee8..d37279266a 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,9 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
arm_compute::TensorInfo* optionalAclBiases = nullptr;
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "NeonFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
aclBiases = BuildArmComputeTensorInfo(biases.value());
aclBiases.set_are_values_constant(biases.value().IsConstant());
optionalAclBiases = &aclBiases;
diff --git a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
index 93884725da..59fc20afad 100644
--- a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -139,8 +139,6 @@ NeonGatherNdWorkload::NeonGatherNdWorkload(const GatherNdQueueDescriptor& descri
flattenedCoeff_Info.SetShape({ keyIndices["ND"] });
BuildArmComputeTensor(m_FlattenedCoeff, flattenedCoeff_Info);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_FlattenedCoeff);
- ARMNN_ASSERT_MSG(indicesInfo.GetDataType() == DataType::Signed32,
- "flattenedCoeff must be same data type as m_FlattenedCoeff");
CopyArmComputeITensorData<int32_t>(flattenedCoeff.data(), m_FlattenedCoeff);
// Prepare the tensor to store the output of the multiplication
diff --git a/src/backends/neon/workloads/NeonSqrtWorkload.cpp b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
index 9c3d8a0a99..ee57a0184c 100644
--- a/src/backends/neon/workloads/NeonSqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,10 @@ arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo& input, const Tens
NeonSqrtWorkload::NeonSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
: NeonBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
{
- ARMNN_ASSERT(descriptor.m_Parameters.m_Operation == UnaryOperation::Sqrt);
+ if (descriptor.m_Parameters.m_Operation != UnaryOperation::Sqrt)
+ {
+ throw InvalidArgumentException("NeonSqrtWorkload: The descriptor does not indicate a Sqrt operation.");
+ }
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSqrtWorkload_Construct",
descriptor.m_Parameters,
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 2fa118b679..2eedf98cd1 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTransposeConvolution2dWorkload.hpp"
@@ -37,9 +37,10 @@ arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo&
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
-
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "NeonTransposeConvolution2dWorkload: Bias was enabled in the descriptor but no value was supplied.");
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
@@ -97,8 +98,6 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
- ARMNN_ASSERT(m_Layer);
-
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 694c3ab928..2c7cd1bf3e 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -1,9 +1,10 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
+#include <armnn/Exceptions.hpp>
#include <armnn/backends/Workload.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonTensorHandle.hpp>
@@ -69,8 +70,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
TensorInfo tensorInfo,
const ITensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
switch(tensorInfo.GetDataType())
{
case DataType::Float16:
@@ -104,8 +104,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstTensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
switch(handle->GetTensorInfo().GetDataType())
{
case DataType::Float16: