aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2024-02-01 15:00:43 +0000
committerColm Donelan <colm.donelan@arm.com>2024-02-21 09:36:37 +0000
commitb4ef16334900af33bf4321f28c90f62bf32238cd (patch)
tree0d8299e44df109d95ce21bf56b9441019e6c7403
parent04a0da655f89e1c024cf16f31ab30176364c9362 (diff)
downloadarmnn-b4ef16334900af33bf4321f28c90f62bf32238cd.tar.gz
IVGCVSW-7854 Remove/rewrite asserts in the backends.
* Identify usages of ARMNN_ASSERT that should be proper exceptions. * Change ARMNN_ASSERT in Doctests to CHECK. * Verify any remaining assertions are reasonable. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: Ifd1f2a5a4bb60135e8654305035ec70e09c4dc2d
-rw-r--r--src/backends/aclCommon/ArmComputeSubgraphUtils.hpp8
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp7
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp13
-rw-r--r--src/backends/aclCommon/BaseMemoryManager.cpp10
-rw-r--r--src/backends/backendsCommon/LayerSupportRules.hpp5
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp32
-rw-r--r--src/backends/cl/ClBackend.cpp3
-rw-r--r--src/backends/cl/ClBackendContext.cpp6
-rw-r--r--src/backends/cl/ClContextControl.cpp10
-rw-r--r--src/backends/cl/ClImportTensorHandle.hpp4
-rw-r--r--src/backends/cl/ClImportTensorHandleFactory.hpp1
-rw-r--r--src/backends/cl/ClTensorHandleFactory.cpp3
-rw-r--r--src/backends/cl/workloads/ClChannelShuffleWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClConstantWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp5
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClFullyConnectedWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClGatherNdWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClSqrtWorkload.cpp7
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp5
-rw-r--r--src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp4
-rw-r--r--src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp2
-rw-r--r--src/backends/neon/NeonTensorHandle.hpp7
-rw-r--r--src/backends/neon/NeonTimer.cpp8
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp25
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp7
-rw-r--r--src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonConvolution3dWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonGatherNdWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSqrtWorkload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp11
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp9
-rw-r--r--src/backends/reference/RefLayerSupport.cpp4
-rw-r--r--src/backends/reference/RefMemoryManager.cpp18
-rw-r--r--src/backends/reference/RefTensorHandle.cpp6
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp13
-rw-r--r--src/backends/reference/workloads/BatchMatMulImpl.cpp3
-rw-r--r--src/backends/reference/workloads/Concatenate.cpp6
-rw-r--r--src/backends/reference/workloads/ConvImpl.cpp13
-rw-r--r--src/backends/reference/workloads/DepthToSpace.cpp5
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp9
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp16
-rw-r--r--src/backends/reference/workloads/FullyConnected.cpp5
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp10
-rw-r--r--src/backends/reference/workloads/MirrorPad.cpp10
-rw-r--r--src/backends/reference/workloads/Reduce.cpp4
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/Resize.cpp5
-rw-r--r--src/backends/reference/workloads/Softmax.cpp10
-rw-r--r--src/backends/reference/workloads/Splitter.cpp7
-rw-r--r--src/backends/reference/workloads/Splitter.hpp10
-rw-r--r--src/backends/reference/workloads/StridedSlice.cpp8
-rw-r--r--src/backends/reference/workloads/TensorBufferArrayView.hpp7
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp3
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp4
-rw-r--r--src/backends/tosaReference/TosaRefMemoryManager.cpp19
-rw-r--r--src/backends/tosaReference/TosaRefTensorHandle.cpp8
65 files changed, 208 insertions, 290 deletions
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index a44acb0f54..9b889141be 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/backends/OptimizationViews.hpp>
-#include <armnn/utility/Assert.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
#include <backendsCommon/SubgraphUtils.hpp>
@@ -330,11 +329,6 @@ std::vector<IConnectableLayer*> ChainReduceLayers(OptimizationViews& optimizatio
layers.emplace_back(replacementLayer);
}
-
- // Check if the TensorInfo from the last layer equals the inferred output from the original layer.
- ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() ==
- PolymorphicDowncast<Layer*>(layers.back())->GetOutputSlot().GetTensorInfo());
-
return layers;
}
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index a11b966f34..c5b4fa157e 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -2,10 +2,11 @@
// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#include <armnn/Exceptions.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
-#include "armnn/Exceptions.hpp"
#include "ArmComputeUtils.hpp"
#include <armnn/Descriptors.hpp>
@@ -43,7 +44,6 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi
case armnn::DataType::Signed32:
return arm_compute::DataType::S32;
default:
- ARMNN_ASSERT_MSG(false, "Unknown data type");
return arm_compute::DataType::UNKNOWN;
}
}
@@ -75,8 +75,7 @@ armnn::DataType GetArmNNDataType(arm_compute::DataType dataType)
case arm_compute::DataType::S32:
return armnn::DataType::Signed32;
default:
- ARMNN_ASSERT_MSG(false, "Unknown data type");
- return armnn::DataType::Float32;
+ throw InvalidArgumentException("Unknown arm_compute::DataType data type");
}
}
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 9a30a7456e..d7025aa5e2 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -1,12 +1,12 @@
//
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/Descriptors.hpp>
+#include <armnn/Exceptions.hpp>
#include <armnn/Tensor.hpp>
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/backends/WorkloadData.hpp>
#include <armnnUtils/TensorUtils.hpp>
@@ -233,8 +233,7 @@ inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn
}
unsigned int dim = tensor.GetNumDimensions();
-
- ARMNN_ASSERT(dim != 0);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dim != 0, "The number of dimensions in this tensor cannot be zero.");
// Currently ArmNN support axis 1.
auto aclAxis = (static_cast<T>(dim) - 1);
@@ -274,9 +273,9 @@ inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
{
int rank = static_cast<int>(tensor.GetNumDimensions());
- ARMNN_ASSERT(rank != 0);
- ARMNN_ASSERT((-1 * rank) <= armnnAxis);
- ARMNN_ASSERT(armnnAxis < rank);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(rank != 0, "The number of dimensions in this tensor cannot be zero.");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(armnnAxis < rank, "Incompatible value of armnnAxis.");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE((-1 * rank) <= armnnAxis, "Incompatible value of armnnAxis.");
int sign = (armnnAxis < 0) ? -1 : 1;
int aclAxis = sign * rank - 1 - armnnAxis;
diff --git a/src/backends/aclCommon/BaseMemoryManager.cpp b/src/backends/aclCommon/BaseMemoryManager.cpp
index 206cf9b230..50517cb54c 100644
--- a/src/backends/aclCommon/BaseMemoryManager.cpp
+++ b/src/backends/aclCommon/BaseMemoryManager.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "BaseMemoryManager.hpp"
@@ -18,7 +18,7 @@ namespace armnn
BaseMemoryManager::BaseMemoryManager(std::shared_ptr<arm_compute::IAllocator> alloc,
MemoryAffinity memoryAffinity)
{
- ARMNN_ASSERT(alloc);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(alloc, "A null allocator has been passed to BaseMemoryManager.");
m_Allocator = std::move(alloc);
m_IntraLayerMemoryMgr = CreateArmComputeMemoryManager(memoryAffinity);
@@ -50,30 +50,24 @@ void BaseMemoryManager::Acquire()
static const size_t s_NumPools = 1;
// Allocate memory pools for intra-layer memory manager
- ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Allocate memory pools for inter-layer memory manager
- ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Acquire inter-layer memory group. NOTE: This has to come after allocating the pools
- ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->acquire();
}
void BaseMemoryManager::Release()
{
// Release inter-layer memory group. NOTE: This has to come before releasing the pools
- ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->release();
// Release memory pools managed by intra-layer memory manager
- ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->clear();
// Release memory pools managed by inter-layer memory manager
- ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->clear();
}
#else
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index a83fd62867..1bd825b774 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -1,11 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
-#include <armnn/utility/Assert.hpp>
#include <algorithm>
namespace armnn
@@ -29,7 +28,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
- ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ throw InvalidArgumentException("GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 560182286e..8ed6f05223 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -70,8 +70,7 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
case DataType::QSymmS16:
return nullptr;
default:
- ARMNN_ASSERT_MSG(false, "Unknown DataType.");
- return nullptr;
+ throw InvalidArgumentException("Unknown data type passed to MakeWorkloadHelper");
}
}
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 2d7a5fdffc..0ddb4291f1 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,8 +40,7 @@ DataType GetBiasDataType(DataType inputDataType)
case DataType::QSymmS16:
return DataType::Signed32;
default:
- ARMNN_ASSERT_MSG(false, "Invalid input data type");
- return DataType::Float32;
+ throw InvalidArgumentException("GetBiasDataType(): Unsupported data type.");
}
}
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 2538211a41..1f8d4dae1d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,7 +61,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
- ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ throw InvalidArgumentException("GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
@@ -262,8 +262,9 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
- "Convolution2dLayer: Weights should be connected as a Constant Layer.");
+
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(1).GetConnection(),
+ "Convolution2dLayer: Weights should be connected as a Constant Layer.");
const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
dataType);
@@ -273,8 +274,8 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
- "Convolution2dLayer: Bias should be connected as a Constant Layer.");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(2).GetConnection(),
+ "Convolution2dLayer:Bias should be connected as a Constant Layer.");
biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
GetBiasTypeFromWeightsType(dataType));
}
@@ -296,8 +297,8 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
- "Convolution3dLayer: Weights should be connected as a Constant Layer.");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(1).GetConnection(),
+ "Convolution3dLayer: Weights should be connected as a Constant Layer.");
const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
dataType);
@@ -352,8 +353,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
dataType);
- ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
-
const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
// Construct optional biases object based on the value of m_BiasEnabled
@@ -524,7 +523,7 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unexpected bias type");
+ throw InvalidArgumentException("Unexpected bias type");
}
}
}
@@ -987,9 +986,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
LstmInputParamsInfo paramsInfo;
// Basic parameters
- ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
- ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
- ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
@@ -1431,12 +1427,15 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ cLayer->m_Bias.get() != nullptr,
+ "TransposeConvolution2d: Bias was enabled in the descriptor but no value was supplied.");
biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
GetBiasTypeFromWeightsType(dataType));
}
- ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(cLayer->m_Weight.get() != nullptr,
+ "TransposeConvolution2d: Weights cannot be null.");
const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
result = layerSupportObject.IsTransposeConvolution2dSupported(input,
@@ -1602,7 +1601,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
}
default:
{
- ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
reason.value() = "Unrecognised layer type";
result = false;
break;
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 6d191a594b..3073999072 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,6 @@
#include <aclCommon/ArmComputeSubgraphUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/BaseMemoryManager.hpp>
#include <armnn/backends/IBackendContext.hpp>
#include <armnn/backends/IMemoryManager.hpp>
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index adee2763ba..8df8143927 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -7,7 +7,6 @@
#include "ClContextControl.hpp"
#include <armnn/Logging.hpp>
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/core/CL/OpenCL.h>
@@ -94,8 +93,7 @@ ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options)
return TuningLevel::Exhaustive;
default:
{
- ARMNN_ASSERT_MSG(false, "Tuning level not recognised.");
- return TuningLevel::None;
+ throw InvalidArgumentException("Invalid value of tuning level specified.");
}
}
};
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index 34eca961b4..20223ae384 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,9 +9,6 @@
#include <LeakChecking.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
@@ -34,9 +31,6 @@ ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
, m_HeuristicsHandle(heuristicsHandle)
, m_ProfilingEnabled(profilingEnabled)
{
- // Ignore m_ProfilingEnabled if unused to avoid compiling problems when ArmCompute is disabled.
- IgnoreUnused(m_ProfilingEnabled);
-
try
{
std::vector<cl::Platform> platforms;
@@ -60,11 +54,9 @@ ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
// Removes the use of global CL context.
cl::Context::setDefault(cl::Context{});
- ARMNN_ASSERT(cl::Context::getDefault()() == NULL);
// Removes the use of global CL command queue.
cl::CommandQueue::setDefault(cl::CommandQueue{});
- ARMNN_ASSERT(cl::CommandQueue::getDefault()() == NULL);
// Always load the OpenCL runtime.
LoadOpenClRuntime();
diff --git a/src/backends/cl/ClImportTensorHandle.hpp b/src/backends/cl/ClImportTensorHandle.hpp
index a03a4e9ea6..b863f08758 100644
--- a/src/backends/cl/ClImportTensorHandle.hpp
+++ b/src/backends/cl/ClImportTensorHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -244,8 +244,6 @@ private:
{
throw MemoryImportException(status.error_description());
}
-
- ARMNN_ASSERT(!m_Tensor.info()->is_resizable());
return imported;
}
// Only used for testing
diff --git a/src/backends/cl/ClImportTensorHandleFactory.hpp b/src/backends/cl/ClImportTensorHandleFactory.hpp
index 7e22949647..b22eb52ed3 100644
--- a/src/backends/cl/ClImportTensorHandleFactory.hpp
+++ b/src/backends/cl/ClImportTensorHandleFactory.hpp
@@ -4,7 +4,6 @@
//
#pragma once
-#include <aclCommon/BaseMemoryManager.hpp>
#include <armnn/MemorySources.hpp>
#include <armnn/backends/IMemoryManager.hpp>
#include <armnn/backends/ITensorHandleFactory.hpp>
diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp
index be3ca5e05a..df99677fbb 100644
--- a/src/backends/cl/ClTensorHandleFactory.cpp
+++ b/src/backends/cl/ClTensorHandleFactory.cpp
@@ -1,11 +1,12 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ClTensorHandleFactory.hpp"
#include "ClTensorHandle.hpp"
+#include <armnn/backends/IMemoryManager.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
index 9ce05713b0..0e10b37de2 100644
--- a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
+++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -80,8 +80,8 @@ ClChannelShuffleWorkload::ClChannelShuffleWorkload(const ChannelShuffleQueueDesc
aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported axis");
- break;
+ throw InvalidArgumentException("Value for axis: " + std::to_string(descriptor.m_Parameters.m_Axis) +
+ " is not valid");
}
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index bbf6476c0a..619c0f8a11 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,7 +61,7 @@ void ClConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- ARMNN_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
@@ -105,8 +105,7 @@ void ClConstantWorkload::Execute() const
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unknown data type");
- break;
+ throw InvalidArgumentException("Unknown data type.");
}
}
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 2fc174cfc2..7ae09e3eef 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -99,7 +99,8 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
arm_compute::ICLTensor& bias = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
bias.info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
// We assume here that NeonConvolution2dWorkloadValidate has been called before the constructor.
- ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(info.m_InputTensorInfos[2].IsConstant() == true,
+ "The bias tensor must be constant.");
m_BiasProxy = std::make_unique<ICLTensorProxy>(&bias);
}
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index e5ee9b9e06..088814b8c9 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -140,7 +140,6 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
activationInfo,
aclDilationInfo);
}
- ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
// Add details for profiling output
WorkloadInfo detailsInfo;
@@ -158,7 +157,6 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
void ClDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthwiseConvolutionWorkload_Execute");
- ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 959f430712..0b6606f360 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,9 @@ arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
arm_compute::TensorInfo* optionalAclBiases = nullptr;
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "ClFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
aclBiases = BuildArmComputeTensorInfo(biases.value());
aclBiases.set_are_values_constant(biases.value().IsConstant());
optionalAclBiases = &aclBiases;
diff --git a/src/backends/cl/workloads/ClGatherNdWorkload.cpp b/src/backends/cl/workloads/ClGatherNdWorkload.cpp
index 1351f9685f..4e9dd7526f 100644
--- a/src/backends/cl/workloads/ClGatherNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -142,8 +142,6 @@ ClGatherNdWorkload::ClGatherNdWorkload(const GatherNdQueueDescriptor& descriptor
flattenedCoeff_Info.SetShape({ keyIndices["ND"] });
BuildArmComputeTensor(m_FlattenedCoeff, flattenedCoeff_Info);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_FlattenedCoeff);
- ARMNN_ASSERT_MSG(indicesInfo.GetDataType() == DataType::Signed32,
- "flattenedCoeff must be same data type as m_FlattenedCoeff");
CopyArmComputeClTensorData<int32_t>(m_FlattenedCoeff, flattenedCoeff.data());
// Prepare the tensor to store the output of the multiplication
diff --git a/src/backends/cl/workloads/ClSqrtWorkload.cpp b/src/backends/cl/workloads/ClSqrtWorkload.cpp
index e36adf6d4c..d41584e6d6 100644
--- a/src/backends/cl/workloads/ClSqrtWorkload.cpp
+++ b/src/backends/cl/workloads/ClSqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,7 +34,10 @@ ClSqrtWorkload::ClSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor
const arm_compute::CLCompileContext& clCompileContext)
: ClBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
{
- ARMNN_ASSERT(descriptor.m_Parameters.m_Operation == UnaryOperation::Sqrt);
+ if (descriptor.m_Parameters.m_Operation != UnaryOperation::Sqrt)
+ {
+ throw InvalidArgumentException("ClSqrtWorkload: The descriptor does not indicate a Sqrt operation.");
+ }
// Report Profiling Details
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClSqrtWorkload_Construct",
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index d3eeadeb31..c3fafd4784 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,8 +38,11 @@ arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo& i
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
-
+ if (!biases.has_value())
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
+ "ArmNN ClTransposeConv2dWorkload has empty bias value."};
+ }
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 4b491e3cec..78b09b062d 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -124,8 +124,7 @@ inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
const ConstTensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
switch(handle->GetTensorInfo().GetDataType())
{
diff --git a/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp b/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp
index 39d3c0ddab..a68d3e635f 100644
--- a/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp
+++ b/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp
@@ -57,8 +57,6 @@ void GpuFsaConstantWorkload::Execute() const
if (!m_RanOnce)
{
const ConstantQueueDescriptor& data = this->m_Data;
-
- ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::CLTensor& output = static_cast<GpuFsaTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType = static_cast<GpuFsaTensorHandle*>(data.m_Outputs[0])->GetDataType();
@@ -102,7 +100,7 @@ void GpuFsaConstantWorkload::Execute() const
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unknown data type");
+ throw InvalidArgumentException("Unknown data type passed to GpuFsaConstantWorkload::Execute()");
break;
}
}
diff --git a/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp b/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp
index 10954b07b5..567b9e303c 100644
--- a/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp
+++ b/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp
@@ -100,7 +100,7 @@ namespace armnn
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
const ConstTensorHandle* handle)
{
- ARMNN_ASSERT(handle);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeClTensorData.");
armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
switch(handle->GetTensorInfo().GetDataType())
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index e5f210773d..303676043e 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,10 +8,9 @@
#include <BFloat16.hpp>
#include <Half.hpp>
-#include <armnn/utility/Assert.hpp>
-
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/Exceptions.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <arm_compute/runtime/MemoryGroup.h>
@@ -68,7 +67,7 @@ public:
// If we have enabled Importing, don't manage the tensor
if (!m_IsImportEnabled)
{
- ARMNN_ASSERT(m_MemoryGroup != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_MemoryGroup, "arm_compute::MemoryGroup is null.");
m_MemoryGroup->manage(&m_Tensor);
}
}
diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp
index dbb1503d24..88d8cb02e9 100644
--- a/src/backends/neon/NeonTimer.cpp
+++ b/src/backends/neon/NeonTimer.cpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTimer.hpp"
#include "NeonInterceptorScheduler.hpp"
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <memory>
@@ -21,7 +20,10 @@ static thread_local auto g_Interceptor = std::make_shared<NeonInterceptorSchedul
void NeonTimer::Start()
{
m_Kernels.clear();
- ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
+ if (g_Interceptor->GetKernels() != nullptr)
+ {
+ throw RuntimeException("This NeonTimer instance has already been started.");
+ }
g_Interceptor->SetKernels(&m_Kernels);
m_RealSchedulerType = arm_compute::Scheduler::get_type();
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 96429a84e1..9c32e32375 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,13 +1,11 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonWorkloadFactoryHelper.hpp"
#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
#include <armnn/backends/MemCopyWorkload.hpp>
@@ -283,27 +281,20 @@ TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
{
Graph graph;
- using ModelOptions = std::vector<BackendOptions>;
+ using ModelOptions = std::vector<BackendOptions>;
ModelOptions modelOptions = {};
- BackendOptions cpuAcc("CpuAcc",
- {
- { "FastMathEnabled", true }
- });
+ BackendOptions cpuAcc("CpuAcc", { { "FastMathEnabled", true } });
modelOptions.push_back(cpuAcc);
NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager(), modelOptions);
- auto workload =
- CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory,
- graph,
- DataLayout::NCHW,
- modelOptions);
+ auto workload = CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(
+ factory, graph, DataLayout::NCHW, modelOptions);
- ARMNN_ASSERT(workload != nullptr);
+ CHECK(workload != nullptr);
auto conv2dWorkload = PolymorphicDowncast<NeonConvolution2dWorkload*>(workload.get());
- IgnoreUnused(conv2dWorkload);
- ARMNN_ASSERT(conv2dWorkload != nullptr);
- ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
+ CHECK(conv2dWorkload != nullptr);
+ CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
}
template <typename armnn::DataType DataType>
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index bc8ad5de5a..d6fd081c65 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -1,10 +1,12 @@
//
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <Graph.hpp>
#include <Network.hpp>
+#include <aclCommon/BaseMemoryManager.hpp>
+
#include <neon/NeonTensorHandle.hpp>
#include <neon/NeonTensorHandleFactory.hpp>
@@ -16,7 +18,6 @@
#include <CommonTestUtils.hpp>
#include <doctest/doctest.h>
-#include <armnn/utility/Assert.hpp>
TEST_SUITE("NeonTensorHandleTests")
{
@@ -190,7 +191,7 @@ TEST_CASE("NeonTensorHandleSupportsInPlaceComputation")
NeonTensorHandleFactory handleFactory(memoryManager);
// NeonTensorHandleFactory supports InPlaceComputation
- ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
+ CHECK(handleFactory.SupportsInPlaceComputation() == true);
}
}
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
index a44c9aa0d4..8cd36db355 100644
--- a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -76,7 +76,8 @@ NeonChannelShuffleWorkload::NeonChannelShuffleWorkload(const ChannelShuffleQueue
aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported axis");
+ throw InvalidArgumentException("Value for axis: " + std::to_string(descriptor.m_Parameters.m_Axis) +
+ " is not valid");
break;
}
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index f5b0128dc7..270e3fad5c 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -65,7 +65,7 @@ void NeonConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- ARMNN_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
arm_compute::ITensor& output =
PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
@@ -116,8 +116,7 @@ void NeonConstantWorkload::Execute() const
}
default:
{
- ARMNN_ASSERT_MSG(false, "Unknown data type");
- break;
+ throw InvalidArgumentException("Unknown data type.");
}
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index c81022b915..fdc52ef797 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -95,8 +95,6 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_BiasTensor, info.m_InputTensorInfos[2], m_Data.m_Parameters.m_DataLayout);
m_BiasTensor->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
- // We assume here that NeonConvolution2dWorkloadValidate has been called before the constructor.
- ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
}
arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -141,8 +139,6 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
GetGuid());
m_ConvolutionLayer.reset(convolutionLayer.release());
-
- ARMNN_ASSERT(m_ConvolutionLayer);
m_KernelTensorInfo = info.m_InputTensorInfos[1];
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
index 5bf6e100ed..ef03dde542 100644
--- a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -105,9 +105,6 @@ NeonConvolution3dWorkload::NeonConvolution3dWorkload(const Convolution3dQueueDes
this->GetGuid());
m_ConvolutionLayer.reset(convolutionLayer.release());
-
- ARMNN_ASSERT(m_ConvolutionLayer);
-
m_ConvolutionLayer->prepare();
}
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index b9e9ebb785..de6601f336 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -146,15 +146,12 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
detailsInfo,
GetGuid());
- ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
-
m_pDepthwiseConvolutionLayer->prepare();
}
void NeonDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthwiseConvolutionWorkload_Execute");
- ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 9503abdee8..d37279266a 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,9 @@ arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
arm_compute::TensorInfo* optionalAclBiases = nullptr;
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "NeonFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
aclBiases = BuildArmComputeTensorInfo(biases.value());
aclBiases.set_are_values_constant(biases.value().IsConstant());
optionalAclBiases = &aclBiases;
diff --git a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
index 93884725da..59fc20afad 100644
--- a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -139,8 +139,6 @@ NeonGatherNdWorkload::NeonGatherNdWorkload(const GatherNdQueueDescriptor& descri
flattenedCoeff_Info.SetShape({ keyIndices["ND"] });
BuildArmComputeTensor(m_FlattenedCoeff, flattenedCoeff_Info);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_FlattenedCoeff);
- ARMNN_ASSERT_MSG(indicesInfo.GetDataType() == DataType::Signed32,
- "flattenedCoeff must be same data type as m_FlattenedCoeff");
CopyArmComputeITensorData<int32_t>(flattenedCoeff.data(), m_FlattenedCoeff);
// Prepare the tensor to store the output of the multiplication
diff --git a/src/backends/neon/workloads/NeonSqrtWorkload.cpp b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
index 9c3d8a0a99..ee57a0184c 100644
--- a/src/backends/neon/workloads/NeonSqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,10 @@ arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo& input, const Tens
NeonSqrtWorkload::NeonSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
: NeonBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
{
- ARMNN_ASSERT(descriptor.m_Parameters.m_Operation == UnaryOperation::Sqrt);
+ if (descriptor.m_Parameters.m_Operation != UnaryOperation::Sqrt)
+ {
+ throw InvalidArgumentException("NeonSqrtWorkload: The descriptor does not indicate a Sqrt operation.");
+ }
ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSqrtWorkload_Construct",
descriptor.m_Parameters,
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 2fa118b679..2eedf98cd1 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NeonTransposeConvolution2dWorkload.hpp"
@@ -37,9 +37,10 @@ arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo&
if (descriptor.m_BiasEnabled)
{
- ARMNN_ASSERT(biases.has_value());
-
- aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ biases.has_value(),
+ "NeonTransposeConvolution2dWorkload: Bias was enabled in the descriptor but no value was supplied.");
+ aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
@@ -97,8 +98,6 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
- ARMNN_ASSERT(m_Layer);
-
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 694c3ab928..2c7cd1bf3e 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -1,9 +1,10 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
+#include <armnn/Exceptions.hpp>
#include <armnn/backends/Workload.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonTensorHandle.hpp>
@@ -69,8 +70,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
TensorInfo tensorInfo,
const ITensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
switch(tensorInfo.GetDataType())
{
case DataType::Float16:
@@ -104,8 +104,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstTensorHandle* handle)
{
- ARMNN_ASSERT(handle);
-
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
switch(handle->GetTensorInfo().GetDataType())
{
case DataType::Float16:
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 40d243e10a..f97d03a26e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -958,7 +958,6 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
"Reference concatenation: output type not supported");
for (const TensorInfo* input : inputs)
{
- ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference concatenation: input type not supported");
@@ -2629,7 +2628,6 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
"Reference stack: output type not supported");
for (const TensorInfo* input : inputs)
{
- ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference stack: input type not supported");
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index 76054e41e1..80f3531df8 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -1,10 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RefMemoryManager.hpp"
-#include <armnn/utility/Assert.hpp>
+#include <armnn/Exceptions.hpp>
#include <algorithm>
@@ -35,7 +35,7 @@ RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes)
void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
{
- ARMNN_ASSERT(pool);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(pool, "Null memory manager passed to RefMemoryManager.");
m_FreePools.push_back(pool);
}
@@ -75,25 +75,29 @@ RefMemoryManager::Pool::~Pool()
void* RefMemoryManager::Pool::GetPointer()
{
- ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+ ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+ "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
return m_Pointer;
}
void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
{
- ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+ "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
m_Size = std::max(m_Size, numBytes);
}
void RefMemoryManager::Pool::Acquire()
{
- ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+ ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+ "RefMemoryManager::Pool::Acquire() called when memory already acquired");
m_Pointer = ::operator new(size_t(m_Size));
}
void RefMemoryManager::Pool::Release()
{
- ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+ ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+ "RefMemoryManager::Pool::Release() called when memory not acquired");
::operator delete(m_Pointer);
m_Pointer = nullptr;
}
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 07f497c54e..1158a14bc4 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,8 +44,8 @@ RefTensorHandle::~RefTensorHandle()
void RefTensorHandle::Manage()
{
- ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
- ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ ARMNN_THROW_MSG_IF_FALSE(!m_Pool, RuntimeException, "RefTensorHandle::Manage() called twice");
+ ARMNN_THROW_MSG_IF_FALSE(!m_UnmanagedMemory, RuntimeException, "RefTensorHandle::Manage() called after Allocate()");
if (m_MemoryManager)
{
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 694c22913c..5c5fff39d6 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/TypesUtils.hpp>
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
@@ -78,28 +77,28 @@ public:
TypedIterator& operator++() override
{
- ARMNN_ASSERT(m_Iterator);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
++m_Iterator;
return *this;
}
TypedIterator& operator+=(const unsigned int increment) override
{
- ARMNN_ASSERT(m_Iterator);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
m_Iterator += increment;
return *this;
}
TypedIterator& operator-=(const unsigned int increment) override
{
- ARMNN_ASSERT(m_Iterator);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
m_Iterator -= increment;
return *this;
}
TypedIterator& operator[](const unsigned int index) override
{
- ARMNN_ASSERT(m_Iterator);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
m_Iterator = m_Start + index;
return *this;
}
@@ -763,7 +762,7 @@ public:
inline PerAxisIterator& SetIndexOnMem(const unsigned int index)
{
- ARMNN_ASSERT(m_Iterator);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "PerAxisIterator: m_Iterator is null!");
m_Iterator = m_Start + index;
if (index < m_AxisFactor)
{
diff --git a/src/backends/reference/workloads/BatchMatMulImpl.cpp b/src/backends/reference/workloads/BatchMatMulImpl.cpp
index c592b3b76c..8e169cbab8 100644
--- a/src/backends/reference/workloads/BatchMatMulImpl.cpp
+++ b/src/backends/reference/workloads/BatchMatMulImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -145,7 +145,6 @@ void BatchMatMul::Adjoint(DataSlot type)
const auto& dataLayout = (type == DataSlot::InputX) ? params.m_DataLayoutX : params.m_DataLayoutY;
const auto axesToAdjoint = BatchMatMulDescriptor::GetAxesToMul(dataLayout,inputInfo.GetShape());
- ARMNN_ASSERT(inputInfo.GetShape()[axesToAdjoint.first] == inputInfo.GetShape()[axesToAdjoint.second]);
// We grab a copy of the tensor data to prevent overwriting
std::vector<float> inputDataClone = (type == DataSlot::InputX) ? inputXData : inputYData;
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index a0e0abfaa0..fece43cb02 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,9 @@ void Concatenate(const ConcatQueueDescriptor &data,
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& inputInfo = GetTensorInfo(inputs[viewIdx]);
- ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions(),
+ "The number of output dimensions does not match the number of input dimensions.");
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 320690eb90..098c931853 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ConvImpl.hpp"
-#include <armnn/utility/Assert.hpp>
-
#include <cmath>
#include <limits>
@@ -15,7 +13,8 @@ namespace armnn
QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
{
- ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(multiplier >= 0.0f && multiplier < 1.0f,
+ "QuantizedMultiplierSmallerThanOne: multiplier must be between 0.0f and 1.0f.");
if (multiplier == 0.0f)
{
m_Multiplier = 0;
@@ -26,14 +25,11 @@ QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multi
const double q = std::frexp(multiplier, &m_RightShift);
m_RightShift = -m_RightShift;
int64_t qFixed = static_cast<int64_t>(::round(q * (1ll << 31)));
- ARMNN_ASSERT(qFixed <= (1ll << 31));
if (qFixed == (1ll << 31))
{
qFixed /= 2;
--m_RightShift;
}
- ARMNN_ASSERT(m_RightShift >= 0);
- ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
m_Multiplier = static_cast<int32_t>(qFixed);
}
}
@@ -61,7 +57,8 @@ int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int
int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
{
- ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(exponent >= 0 && exponent <= 31,
+ "RoundingDivideByPOT: exponent must be between 0 and 31.");
int32_t mask = (1 << exponent) - 1;
int32_t remainder = x & mask;
int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index f5e9ec5498..60098d1bf1 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,8 +8,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <armnn/utility/Assert.hpp>
-
using namespace armnnUtils;
namespace armnn
@@ -22,7 +20,6 @@ void DepthToSpace(const TensorInfo& inputInfo,
unsigned int dataTypeSize)
{
const unsigned int blockSize = descriptor.m_BlockSize;
- ARMNN_ASSERT(blockSize != 0u);
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index fdc8e30c75..3955458049 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Dequantize.hpp"
-#include <armnn/utility/IgnoreUnused.hpp>
-
namespace armnn
{
@@ -15,8 +13,9 @@ void Dequantize(Decoder<float>& inputDecoder,
const TensorInfo& inputInfo,
const TensorInfo& outputInfo)
{
- IgnoreUnused(outputInfo);
- ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ inputInfo.GetNumElements() == outputInfo.GetNumElements(),
+ "Dequantize: The number of elements in the input and output tensors must be the same.");
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
// inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index c5ab327f90..361f8865be 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "DetectionPostProcess.hpp"
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <algorithm>
@@ -140,11 +138,11 @@ void AllocateOutputData(unsigned int numOutput,
void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
const TensorInfo& scoresInfo,
- const TensorInfo& anchorsInfo,
+ const TensorInfo&,
const TensorInfo& detectionBoxesInfo,
- const TensorInfo& detectionClassesInfo,
- const TensorInfo& detectionScoresInfo,
- const TensorInfo& numDetectionsInfo,
+ const TensorInfo&,
+ const TensorInfo&,
+ const TensorInfo&,
const DetectionPostProcessDescriptor& desc,
Decoder<float>& boxEncodings,
Decoder<float>& scores,
@@ -154,7 +152,6 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
float* detectionScores,
float* numDetections)
{
- IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
// Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
// which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
@@ -212,9 +209,6 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
boxCorners[indexH] = yCentre + halfH;
// xmax
boxCorners[indexW] = xCentre + halfW;
-
- ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
- ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
}
unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 47968f4d88..19c01b8987 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FullyConnected.hpp"
-#include <armnn/utility/Assert.hpp>
-
#include "RefWorkloadUtils.hpp"
namespace armnn
@@ -31,7 +29,6 @@ void FullyConnected(const TensorShape& rInputShape,
const TensorShape biasShape{outputSize};
- ARMNN_ASSERT(!biasEnabled || pBiasDecoder != nullptr);
const std::vector<float> decodedBiases = biasEnabled ? pBiasDecoder->DecodeTensor(biasShape) : std::vector<float>();
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 2b6384913e..0926894489 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -1,13 +1,11 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <cmath>
@@ -33,10 +31,8 @@ void LogSoftmax(Decoder<float>& input,
{
const unsigned int numDimensions = inputInfo.GetNumDimensions();
- bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
- ARMNN_ASSERT_MSG(axisIsValid,
- "Axis index is not in range [-numDimensions, numDimensions).");
- IgnoreUnused(axisIsValid);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(ValidateAxis(descriptor.m_Axis, numDimensions),
+ "Axis index is not in range [-numDimensions, numDimensions).");
unsigned int uAxis = descriptor.m_Axis < 0 ?
numDimensions - armnn::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp
index 7388fed147..de3b74b263 100644
--- a/src/backends/reference/workloads/MirrorPad.cpp
+++ b/src/backends/reference/workloads/MirrorPad.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,8 +18,8 @@ inline std::vector<unsigned int> IndexToCoord(const armnn::TensorShape& shape, u
{
unsigned int numOfElements = shape.GetNumElements();
- ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]");
- ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(index <= numOfElements, "Index has to be in [0, num_elements]");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(numOfElements != 0, "Cannot create coordinate from empty shape");
std::vector<unsigned int> coord(shape.GetNumDimensions());
for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i)
@@ -36,8 +36,8 @@ inline std::vector<unsigned int> IndexToCoord(const armnn::TensorShape& shape, u
// E.g. [0, 0, 2] returns 2.
inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector<unsigned int>& coord)
{
- ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
- ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(coord.size() != 0, "Cannot get index of empty coordinate");
unsigned int index = 0;
unsigned int dimSize = 1;
diff --git a/src/backends/reference/workloads/Reduce.cpp b/src/backends/reference/workloads/Reduce.cpp
index 8b28a61388..6ea333b405 100644
--- a/src/backends/reference/workloads/Reduce.cpp
+++ b/src/backends/reference/workloads/Reduce.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -153,8 +153,6 @@ void Reduce(const TensorInfo& inputInfo,
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- ARMNN_ASSERT(armnn::numeric_cast<float>(current) <
- (std::numeric_limits<float>::max() / armnn::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index e45d24a0bd..47c537cf84 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,8 +12,6 @@
#include <Profiling.hpp>
-#include <armnn/utility/Assert.hpp>
-
namespace armnn
{
@@ -38,9 +36,6 @@ void RefLogSoftmaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vec
std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
- ARMNN_ASSERT(decoder != nullptr);
- ARMNN_ASSERT(encoder != nullptr);
-
LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
}
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index c4a4f7f593..1dc95a2a19 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,13 +31,8 @@ void RefStridedSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::v
ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStridedSliceWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
- const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
DataType inputDataType = inputInfo.GetDataType();
- DataType outputDataType = outputInfo.GetDataType();
-
- ARMNN_ASSERT(inputDataType == outputDataType);
- IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
m_Data.m_Parameters,
diff --git a/src/backends/reference/workloads/Resize.cpp b/src/backends/reference/workloads/Resize.cpp
index e80a2057e0..7bed6c6056 100644
--- a/src/backends/reference/workloads/Resize.cpp
+++ b/src/backends/reference/workloads/Resize.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -72,7 +72,8 @@ void Resize(Decoder<float>& in,
bool halfPixelCenters)
{
// alignCorners and halfPixelCenters cannot both be true
- ARMNN_ASSERT(!(alignCorners && halfPixelCenters));
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(!(alignCorners && halfPixelCenters),
+ "Resize: alignCorners and halfPixelCenters cannot both be true");
// We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
// image is projected into the input image to figure out the interpolants and weights. Note that this
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 00d496db85..d792361456 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,10 +16,10 @@ namespace armnn
/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
{
- ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
- "Required axis index greater than number of dimensions.");
- ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
- "Required axis index lower than negative of the number of dimensions");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ "Required axis index greater than number of dimensions.");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ "Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
inputTensorInfo.GetNumDimensions() - static_cast<unsigned int>(abs(axis))
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 695ae8a088..963e3aa6f3 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RefWorkloadUtils.hpp"
#include <armnn/backends/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-#include <armnn/utility/Assert.hpp>
#include "Splitter.hpp"
#include <cmath>
@@ -48,7 +47,9 @@ void Split(const SplitterQueueDescriptor& data,
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]);
- ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions(),
+ "The number of output dimensions does not match the number of input dimensions.");
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 730b071497..f05f654a0c 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,9 @@ void Splitter(const SplitterQueueDescriptor& data,
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]);
- ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+ outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions(),
+ "The number of output dimensions does not match the number of input dimensions.");
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
@@ -69,11 +71,7 @@ void Splitter(const SplitterQueueDescriptor& data,
//We are within the view, to copy input data to the output corresponding to this view.
DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
- ARMNN_ASSERT(outputData);
-
const DataType* inputData = GetInputTensorData<DataType>(0, data);
- ARMNN_ASSERT(inputData);
-
outputData[outIndex] = inputData[index];
}
}
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 68600c9a95..fcd1c357f8 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -1,13 +1,10 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "StridedSlice.hpp"
-#include <ResolveType.hpp>
-
-#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <cstring>
@@ -20,12 +17,11 @@ namespace
void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
{
- ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dimCount <= 4, "Expected input with at most 4 dimensions");
const unsigned int beginIndicesCount =
armnn::numeric_cast<unsigned int>(p.m_Begin.size());
- ARMNN_ASSERT(dimCount >= beginIndicesCount);
const unsigned int padCount = dimCount - beginIndicesCount;
p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index 0b448e6196..c6a7571a92 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,8 +9,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <armnn/utility/Assert.hpp>
-
namespace armnn
{
@@ -25,7 +23,8 @@ public:
, m_Data(data)
, m_DataLayout(dataLayout)
{
- ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Shape.GetNumDimensions() == 4,
+ "Only $d tensors are supported by TensorBufferArrayView.");
}
DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
index 6c2b31437b..a9af249673 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -88,7 +88,6 @@ TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer*
default:
throw armnn::Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type.");
}
- ARMNN_ASSERT(op != nullptr);
std::vector<TosaSerializationTensor*> tensors;
// Only add input tensors if connected layer is an input layer.
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
index 17ea64b984..02dddab8bc 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,8 +44,6 @@ TosaSerializationBasicBlock* ConvertElementwiseUnaryOperator(const Layer* layer,
throw armnn::Exception("ConvertElementwiseUnaryToTosaOperator: Unsupported layer type.");
}
- ARMNN_ASSERT(op != nullptr);
-
std::vector<TosaSerializationTensor*> tensors;
// Only add input tensor if connected layer is an input layer.
// As intermediate or constant tensors will be created separately.
diff --git a/src/backends/tosaReference/TosaRefMemoryManager.cpp b/src/backends/tosaReference/TosaRefMemoryManager.cpp
index 745e6bec35..4384b08b4b 100644
--- a/src/backends/tosaReference/TosaRefMemoryManager.cpp
+++ b/src/backends/tosaReference/TosaRefMemoryManager.cpp
@@ -1,11 +1,10 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TosaRefMemoryManager.hpp"
-#include <armnn/utility/Assert.hpp>
-
+#include <armnn/Exceptions.hpp>
#include <algorithm>
namespace armnn
@@ -35,7 +34,7 @@ TosaRefMemoryManager::Pool* TosaRefMemoryManager::Manage(unsigned int numBytes)
void TosaRefMemoryManager::Allocate(TosaRefMemoryManager::Pool* pool)
{
- ARMNN_ASSERT(pool);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(pool, "Null memory manager passed to TosaRefMemoryManager.");
m_FreePools.push_back(pool);
}
@@ -75,25 +74,29 @@ TosaRefMemoryManager::Pool::~Pool()
void* TosaRefMemoryManager::Pool::GetPointer()
{
- ARMNN_ASSERT_MSG(m_Pointer, "TosaRefMemoryManager::Pool::GetPointer() called when memory not acquired");
+ ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+ "TosaRefMemoryManager::Pool::GetPointer() called when memory not acquired");
return m_Pointer;
}
void TosaRefMemoryManager::Pool::Reserve(unsigned int numBytes)
{
- ARMNN_ASSERT_MSG(!m_Pointer, "TosaRefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+ "TosaRefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
m_Size = std::max(m_Size, numBytes);
}
void TosaRefMemoryManager::Pool::Acquire()
{
- ARMNN_ASSERT_MSG(!m_Pointer, "TosaRefMemoryManager::Pool::Acquire() called when memory already acquired");
+ ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+ "TosaRefMemoryManager::Pool::Acquire() called when memory already acquired");
m_Pointer = ::operator new(size_t(m_Size));
}
void TosaRefMemoryManager::Pool::Release()
{
- ARMNN_ASSERT_MSG(m_Pointer, "TosaRefMemoryManager::Pool::Release() called when memory not acquired");
+ ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+ "TosaRefMemoryManager::Pool::Release() called when memory not acquired");
::operator delete(m_Pointer);
m_Pointer = nullptr;
}
diff --git a/src/backends/tosaReference/TosaRefTensorHandle.cpp b/src/backends/tosaReference/TosaRefTensorHandle.cpp
index aaffc8ab6c..e9dc45ecc1 100644
--- a/src/backends/tosaReference/TosaRefTensorHandle.cpp
+++ b/src/backends/tosaReference/TosaRefTensorHandle.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TosaRefTensorHandle.hpp"
@@ -44,9 +44,9 @@ void TosaRefTensorHandle::Manage()
{
if (!m_IsImportEnabled)
{
- ARMNN_ASSERT_MSG(!m_Pool, "TosaRefTensorHandle::Manage() called twice");
- ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "TosaRefTensorHandle::Manage() called after Allocate()");
-
+ ARMNN_THROW_MSG_IF_FALSE(!m_Pool, RuntimeException, "TosaRefTensorHandle::Manage() called twice");
+ ARMNN_THROW_MSG_IF_FALSE(!m_UnmanagedMemory, RuntimeException,
+ "TosaRefTensorHandle::Manage() called after Allocate()");
m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
}
}