From b4ef16334900af33bf4321f28c90f62bf32238cd Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Thu, 1 Feb 2024 15:00:43 +0000 Subject: IVGCVSW-7854 Remove/rewrite asserts in the backends. * Identify usages of ARMNN_ASSERT that should be proper exceptions. * Change ARMNN_ASSERT in Doctests to CHECK. * Verify any remaining assertions are reasonable. Signed-off-by: Colm Donelan Change-Id: Ifd1f2a5a4bb60135e8654305035ec70e09c4dc2d --- src/backends/reference/RefLayerSupport.cpp | 4 +--- src/backends/reference/RefMemoryManager.cpp | 18 +++++++++++------- src/backends/reference/RefTensorHandle.cpp | 6 +++--- src/backends/reference/workloads/BaseIterator.hpp | 13 ++++++------- src/backends/reference/workloads/BatchMatMulImpl.cpp | 3 +-- src/backends/reference/workloads/Concatenate.cpp | 6 ++++-- src/backends/reference/workloads/ConvImpl.cpp | 13 +++++-------- src/backends/reference/workloads/DepthToSpace.cpp | 5 +---- src/backends/reference/workloads/Dequantize.cpp | 9 ++++----- .../reference/workloads/DetectionPostProcess.cpp | 16 +++++----------- src/backends/reference/workloads/FullyConnected.cpp | 5 +---- src/backends/reference/workloads/LogSoftmax.cpp | 10 +++------- src/backends/reference/workloads/MirrorPad.cpp | 10 +++++----- src/backends/reference/workloads/Reduce.cpp | 4 +--- .../reference/workloads/RefLogSoftmaxWorkload.cpp | 5 ----- .../reference/workloads/RefStridedSliceWorkload.cpp | 7 +------ src/backends/reference/workloads/Resize.cpp | 5 +++-- src/backends/reference/workloads/Softmax.cpp | 10 +++++----- src/backends/reference/workloads/Splitter.cpp | 7 ++++--- src/backends/reference/workloads/Splitter.hpp | 10 ++++------ src/backends/reference/workloads/StridedSlice.cpp | 8 ++------ .../reference/workloads/TensorBufferArrayView.hpp | 7 +++---- 22 files changed, 73 insertions(+), 108 deletions(-) (limited to 'src/backends/reference') diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 40d243e10a..f97d03a26e 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -958,7 +958,6 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp "Reference concatenation: output type not supported"); for (const TensorInfo* input : inputs) { - ARMNN_ASSERT(input != nullptr); supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported, "Reference concatenation: input type not supported"); @@ -2629,7 +2628,6 @@ bool RefLayerSupport::IsStackSupported(const std::vector& inp "Reference stack: output type not supported"); for (const TensorInfo* input : inputs) { - ARMNN_ASSERT(input != nullptr); supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported, "Reference stack: input type not supported"); diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp index 76054e41e1..80f3531df8 100644 --- a/src/backends/reference/RefMemoryManager.cpp +++ b/src/backends/reference/RefMemoryManager.cpp @@ -1,10 +1,10 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "RefMemoryManager.hpp" -#include +#include #include @@ -35,7 +35,7 @@ RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes) void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool) { - ARMNN_ASSERT(pool); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(pool, "Null memory manager passed to RefMemoryManager."); m_FreePools.push_back(pool); } @@ -75,25 +75,29 @@ RefMemoryManager::Pool::~Pool() void* RefMemoryManager::Pool::GetPointer() { - ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired"); + ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException, + "RefMemoryManager::Pool::GetPointer() called when memory not acquired"); return m_Pointer; } void RefMemoryManager::Pool::Reserve(unsigned int numBytes) { - ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired"); + ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException, + "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired"); m_Size = std::max(m_Size, numBytes); } void RefMemoryManager::Pool::Acquire() { - ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired"); + ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException, + "RefMemoryManager::Pool::Acquire() called when memory already acquired"); m_Pointer = ::operator new(size_t(m_Size)); } void RefMemoryManager::Pool::Release() { - ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired"); + ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException, + "RefMemoryManager::Pool::Release() called when memory not acquired"); ::operator delete(m_Pointer); m_Pointer = nullptr; } diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp index 07f497c54e..1158a14bc4 100644 --- a/src/backends/reference/RefTensorHandle.cpp +++ b/src/backends/reference/RefTensorHandle.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019-2023 Arm Ltd. All rights reserved. +// Copyright © 2019-2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -44,8 +44,8 @@ RefTensorHandle::~RefTensorHandle() void RefTensorHandle::Manage() { - ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice"); - ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()"); + ARMNN_THROW_MSG_IF_FALSE(!m_Pool, RuntimeException, "RefTensorHandle::Manage() called twice"); + ARMNN_THROW_MSG_IF_FALSE(!m_UnmanagedMemory, RuntimeException, "RefTensorHandle::Manage() called after Allocate()"); if (m_MemoryManager) { diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index 694c22913c..5c5fff39d6 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -1,12 +1,11 @@ // -// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include -#include #include #include #include @@ -78,28 +77,28 @@ public: TypedIterator& operator++() override { - ARMNN_ASSERT(m_Iterator); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!"); ++m_Iterator; return *this; } TypedIterator& operator+=(const unsigned int increment) override { - ARMNN_ASSERT(m_Iterator); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!"); m_Iterator += increment; return *this; } TypedIterator& operator-=(const unsigned int increment) override { - ARMNN_ASSERT(m_Iterator); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!"); m_Iterator -= increment; return *this; } TypedIterator& operator[](const unsigned int index) override { - ARMNN_ASSERT(m_Iterator); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!"); m_Iterator = m_Start + index; return *this; } @@ -763,7 +762,7 @@ public: inline PerAxisIterator& SetIndexOnMem(const unsigned int index) { - ARMNN_ASSERT(m_Iterator); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "PerAxisIterator: m_Iterator is null!"); m_Iterator = m_Start + index; if (index < m_AxisFactor) { diff --git a/src/backends/reference/workloads/BatchMatMulImpl.cpp b/src/backends/reference/workloads/BatchMatMulImpl.cpp index c592b3b76c..8e169cbab8 100644 --- a/src/backends/reference/workloads/BatchMatMulImpl.cpp +++ b/src/backends/reference/workloads/BatchMatMulImpl.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -145,7 +145,6 @@ void BatchMatMul::Adjoint(DataSlot type) const auto& dataLayout = (type == DataSlot::InputX) ? params.m_DataLayoutX : params.m_DataLayoutY; const auto axesToAdjoint = BatchMatMulDescriptor::GetAxesToMul(dataLayout,inputInfo.GetShape()); - ARMNN_ASSERT(inputInfo.GetShape()[axesToAdjoint.first] == inputInfo.GetShape()[axesToAdjoint.second]); // We grab a copy of the tensor data to prevent overwriting std::vector inputDataClone = (type == DataSlot::InputX) ? inputXData : inputYData; diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp index a0e0abfaa0..fece43cb02 100644 --- a/src/backends/reference/workloads/Concatenate.cpp +++ b/src/backends/reference/workloads/Concatenate.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,7 +40,9 @@ void Concatenate(const ConcatQueueDescriptor &data, //Split view extents are defined by the size of (the corresponding) input tensor. const TensorInfo& inputInfo = GetTensorInfo(inputs[viewIdx]); - ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions()); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( + inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions(), + "The number of output dimensions does not match the number of input dimensions."); // Check all dimensions to see if this element is inside the given input view. bool insideView = true; diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp index 320690eb90..098c931853 100644 --- a/src/backends/reference/workloads/ConvImpl.cpp +++ b/src/backends/reference/workloads/ConvImpl.cpp @@ -1,12 +1,10 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConvImpl.hpp" -#include - #include #include @@ -15,7 +13,8 @@ namespace armnn QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier) { - ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(multiplier >= 0.0f && multiplier < 1.0f, + "QuantizedMultiplierSmallerThanOne: multiplier must be between 0.0f and 1.0f."); if (multiplier == 0.0f) { m_Multiplier = 0; @@ -26,14 +25,11 @@ QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multi const double q = std::frexp(multiplier, &m_RightShift); m_RightShift = -m_RightShift; int64_t qFixed = static_cast(::round(q * (1ll << 31))); - ARMNN_ASSERT(qFixed <= (1ll << 31)); if (qFixed == (1ll << 31)) { qFixed /= 2; --m_RightShift; } - ARMNN_ASSERT(m_RightShift >= 0); - ARMNN_ASSERT(qFixed <= std::numeric_limits::max()); m_Multiplier = static_cast(qFixed); } } @@ -61,7 +57,8 @@ int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent) { - ARMNN_ASSERT(exponent >= 0 && exponent <= 31); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(exponent >= 0 && exponent <= 31, + "RoundingDivideByPOT: exponent must be between 0 and 31."); int32_t mask = (1 << exponent) - 1; int32_t remainder = x & mask; int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0); diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp index f5e9ec5498..60098d1bf1 100644 --- a/src/backends/reference/workloads/DepthToSpace.cpp +++ b/src/backends/reference/workloads/DepthToSpace.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -8,8 +8,6 @@ #include #include -#include - using namespace armnnUtils; namespace armnn @@ -22,7 +20,6 @@ void DepthToSpace(const TensorInfo& inputInfo, unsigned int dataTypeSize) { const unsigned int blockSize = descriptor.m_BlockSize; - ARMNN_ASSERT(blockSize != 0u); const TensorShape& inputShape = inputInfo.GetShape(); const unsigned int batches = inputShape[0]; diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp index fdc8e30c75..3955458049 100644 --- a/src/backends/reference/workloads/Dequantize.cpp +++ b/src/backends/reference/workloads/Dequantize.cpp @@ -1,12 +1,10 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "Dequantize.hpp" -#include - namespace armnn { @@ -15,8 +13,9 @@ void Dequantize(Decoder& inputDecoder, const TensorInfo& inputInfo, const TensorInfo& outputInfo) { - IgnoreUnused(outputInfo); - ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements()); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( + inputInfo.GetNumElements() == outputInfo.GetNumElements(), + "Dequantize: The number of elements in the input and output tensors must be the same."); for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++) { // inputDecoder.Get() dequantizes the data element from whatever diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp index c5ab327f90..361f8865be 100644 --- a/src/backends/reference/workloads/DetectionPostProcess.cpp +++ b/src/backends/reference/workloads/DetectionPostProcess.cpp @@ -1,12 +1,10 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "DetectionPostProcess.hpp" -#include -#include #include #include @@ -140,11 +138,11 @@ void AllocateOutputData(unsigned int numOutput, void DetectionPostProcess(const TensorInfo& boxEncodingsInfo, const TensorInfo& scoresInfo, - const TensorInfo& anchorsInfo, + const TensorInfo&, const TensorInfo& detectionBoxesInfo, - const TensorInfo& detectionClassesInfo, - const TensorInfo& detectionScoresInfo, - const TensorInfo& numDetectionsInfo, + const TensorInfo&, + const TensorInfo&, + const TensorInfo&, const DetectionPostProcessDescriptor& desc, Decoder& boxEncodings, Decoder& scores, @@ -154,7 +152,6 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo, float* detectionScores, float* numDetections) { - IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo); // Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format, // which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax) @@ -212,9 +209,6 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo, boxCorners[indexH] = yCentre + halfH; // xmax boxCorners[indexW] = xCentre + halfW; - - ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]); - ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]); } unsigned int numClassesWithBg = desc.m_NumClasses + 1; diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp index 47968f4d88..19c01b8987 100644 --- a/src/backends/reference/workloads/FullyConnected.cpp +++ b/src/backends/reference/workloads/FullyConnected.cpp @@ -1,12 +1,10 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "FullyConnected.hpp" -#include - #include "RefWorkloadUtils.hpp" namespace armnn @@ -31,7 +29,6 @@ void FullyConnected(const TensorShape& rInputShape, const TensorShape biasShape{outputSize}; - ARMNN_ASSERT(!biasEnabled || pBiasDecoder != nullptr); const std::vector decodedBiases = biasEnabled ? pBiasDecoder->DecodeTensor(biasShape) : std::vector(); diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp index 2b6384913e..0926894489 100644 --- a/src/backends/reference/workloads/LogSoftmax.cpp +++ b/src/backends/reference/workloads/LogSoftmax.cpp @@ -1,13 +1,11 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "LogSoftmax.hpp" #include -#include -#include #include #include @@ -33,10 +31,8 @@ void LogSoftmax(Decoder& input, { const unsigned int numDimensions = inputInfo.GetNumDimensions(); - bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions); - ARMNN_ASSERT_MSG(axisIsValid, - "Axis index is not in range [-numDimensions, numDimensions)."); - IgnoreUnused(axisIsValid); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(ValidateAxis(descriptor.m_Axis, numDimensions), + "Axis index is not in range [-numDimensions, numDimensions)."); unsigned int uAxis = descriptor.m_Axis < 0 ? numDimensions - armnn::numeric_cast(std::abs(descriptor.m_Axis)) : diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp index 7388fed147..de3b74b263 100644 --- a/src/backends/reference/workloads/MirrorPad.cpp +++ b/src/backends/reference/workloads/MirrorPad.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -18,8 +18,8 @@ inline std::vector IndexToCoord(const armnn::TensorShape& shape, u { unsigned int numOfElements = shape.GetNumElements(); - ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]"); - ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape"); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(index <= numOfElements, "Index has to be in [0, num_elements]"); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(numOfElements != 0, "Cannot create coordinate from empty shape"); std::vector coord(shape.GetNumDimensions()); for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i) @@ -36,8 +36,8 @@ inline std::vector IndexToCoord(const armnn::TensorShape& shape, u // E.g. [0, 0, 2] returns 2. inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector& coord) { - ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape"); - ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate"); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(shape.GetNumDimensions() != 0, "Cannot get index from empty shape"); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(coord.size() != 0, "Cannot get index of empty coordinate"); unsigned int index = 0; unsigned int dimSize = 1; diff --git a/src/backends/reference/workloads/Reduce.cpp b/src/backends/reference/workloads/Reduce.cpp index 8b28a61388..6ea333b405 100644 --- a/src/backends/reference/workloads/Reduce.cpp +++ b/src/backends/reference/workloads/Reduce.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -153,8 +153,6 @@ void Reduce(const TensorInfo& inputInfo, for (unsigned int idx = 0; idx < numResolvedAxis; ++idx) { unsigned int current = inputDims[resolvedAxis[idx]]; - ARMNN_ASSERT(armnn::numeric_cast(current) < - (std::numeric_limits::max() / armnn::numeric_cast(numElementsInAxis))); numElementsInAxis *= current; } diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp index e45d24a0bd..47c537cf84 100644 --- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp +++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp @@ -12,8 +12,6 @@ #include -#include - namespace armnn { @@ -38,9 +36,6 @@ void RefLogSoftmaxWorkload::Execute(std::vector inputs, std::vec std::unique_ptr> decoder = MakeDecoder(inputInfo, inputs[0]->Map()); std::unique_ptr> encoder = MakeEncoder(outputInfo, outputs[0]->Map()); - ARMNN_ASSERT(decoder != nullptr); - ARMNN_ASSERT(encoder != nullptr); - LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters); } diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp index c4a4f7f593..1dc95a2a19 100644 --- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp +++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,13 +31,8 @@ void RefStridedSliceWorkload::Execute(std::vector inputs, std::v ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStridedSliceWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); - const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); DataType inputDataType = inputInfo.GetDataType(); - DataType outputDataType = outputInfo.GetDataType(); - - ARMNN_ASSERT(inputDataType == outputDataType); - IgnoreUnused(outputDataType); StridedSlice(inputInfo, m_Data.m_Parameters, diff --git a/src/backends/reference/workloads/Resize.cpp b/src/backends/reference/workloads/Resize.cpp index e80a2057e0..7bed6c6056 100644 --- a/src/backends/reference/workloads/Resize.cpp +++ b/src/backends/reference/workloads/Resize.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -72,7 +72,8 @@ void Resize(Decoder& in, bool halfPixelCenters) { // alignCorners and halfPixelCenters cannot both be true - ARMNN_ASSERT(!(alignCorners && halfPixelCenters)); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(!(alignCorners && halfPixelCenters), + "Resize: alignCorners and halfPixelCenters cannot both be true"); // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output // image is projected into the input image to figure out the interpolants and weights. Note that this diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp index 00d496db85..d792361456 100644 --- a/src/backends/reference/workloads/Softmax.cpp +++ b/src/backends/reference/workloads/Softmax.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -16,10 +16,10 @@ namespace armnn /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo. void Softmax(Decoder& in, Encoder& out, const TensorInfo& inputTensorInfo, float beta, int axis) { - ARMNN_ASSERT_MSG(axis < static_cast(inputTensorInfo.GetNumDimensions()), - "Required axis index greater than number of dimensions."); - ARMNN_ASSERT_MSG(axis >= -static_cast(inputTensorInfo.GetNumDimensions()), - "Required axis index lower than negative of the number of dimensions"); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis < static_cast(inputTensorInfo.GetNumDimensions()), + "Required axis index greater than number of dimensions."); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis >= -static_cast(inputTensorInfo.GetNumDimensions()), + "Required axis index lower than negative of the number of dimensions"); unsigned int uAxis = axis < 0 ? inputTensorInfo.GetNumDimensions() - static_cast(abs(axis)) diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp index 695ae8a088..963e3aa6f3 100644 --- a/src/backends/reference/workloads/Splitter.cpp +++ b/src/backends/reference/workloads/Splitter.cpp @@ -1,12 +1,11 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "RefWorkloadUtils.hpp" #include #include -#include #include "Splitter.hpp" #include @@ -48,7 +47,9 @@ void Split(const SplitterQueueDescriptor& data, //Split view extents are defined by the size of (the corresponding) input tensor. const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]); - ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions()); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( + outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions(), + "The number of output dimensions does not match the number of input dimensions."); // Check all dimensions to see if this element is inside the given input view. bool insideView = true; diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp index 730b071497..f05f654a0c 100644 --- a/src/backends/reference/workloads/Splitter.hpp +++ b/src/backends/reference/workloads/Splitter.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,7 +40,9 @@ void Splitter(const SplitterQueueDescriptor& data, //Split view extents are defined by the size of (the corresponding) input tensor. const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]); - ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions()); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE( + outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions(), + "The number of output dimensions does not match the number of input dimensions."); // Check all dimensions to see if this element is inside the given input view. bool insideView = true; @@ -69,11 +71,7 @@ void Splitter(const SplitterQueueDescriptor& data, //We are within the view, to copy input data to the output corresponding to this view. DataType* outputData = GetOutputTensorData(viewIdx, data); - ARMNN_ASSERT(outputData); - const DataType* inputData = GetInputTensorData(0, data); - ARMNN_ASSERT(inputData); - outputData[outIndex] = inputData[index]; } } diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp index 68600c9a95..fcd1c357f8 100644 --- a/src/backends/reference/workloads/StridedSlice.cpp +++ b/src/backends/reference/workloads/StridedSlice.cpp @@ -1,13 +1,10 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "StridedSlice.hpp" -#include - -#include #include #include @@ -20,12 +17,11 @@ namespace void PadParams(StridedSliceDescriptor& p, unsigned int dimCount) { - ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions"); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dimCount <= 4, "Expected input with at most 4 dimensions"); const unsigned int beginIndicesCount = armnn::numeric_cast(p.m_Begin.size()); - ARMNN_ASSERT(dimCount >= beginIndicesCount); const unsigned int padCount = dimCount - beginIndicesCount; p.m_Begin.resize(dimCount); diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp index 0b448e6196..c6a7571a92 100644 --- a/src/backends/reference/workloads/TensorBufferArrayView.hpp +++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017, 2024 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -9,8 +9,6 @@ #include -#include - namespace armnn { @@ -25,7 +23,8 @@ public: , m_Data(data) , m_DataLayout(dataLayout) { - ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4); + ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Shape.GetNumDimensions() == 4, + "Only $d tensors are supported by TensorBufferArrayView."); } DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const -- cgit v1.2.1