aboutsummaryrefslogtreecommitdiff
path: root/shim
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2023-09-16 18:53:52 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-09-19 13:06:09 +0000
commit987398f3c1b93cc63f6c25da8b9a20c68e7e8270 (patch)
tree4b6bb4f6226db3f14d7b8f99039c9003bf2c468c /shim
parent93b6064ca97c01a13d201a3829c24d25beaa457c (diff)
downloadarmnn-987398f3c1b93cc63f6c25da8b9a20c68e7e8270.tar.gz
IVGCVSW-7855 removed ASSERTs from shim code
Signed-off-by: Jim Flynn <jim.flynn@arm.com> Change-Id: I8b34e74800ebdb43e1b3f996eacd6c3360a331eb
Diffstat (limited to 'shim')
-rw-r--r--shim/sl/canonical/CanonicalUtils.cpp7
-rw-r--r--shim/sl/canonical/ConversionUtils.cpp31
-rw-r--r--shim/sl/canonical/ConversionUtils.hpp47
-rw-r--r--shim/sl/canonical/Converter.cpp12
4 files changed, 80 insertions, 17 deletions
diff --git a/shim/sl/canonical/CanonicalUtils.cpp b/shim/sl/canonical/CanonicalUtils.cpp
index 08a728c293..5afd8bd063 100644
--- a/shim/sl/canonical/CanonicalUtils.cpp
+++ b/shim/sl/canonical/CanonicalUtils.cpp
@@ -366,7 +366,10 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
return;
}
- ARMNN_ASSERT(profiler);
+ if (profiler == nullptr)
+ {
+ throw armnn::InvalidArgumentException("DumpJsonProfilingIfRequired: pointer to profiler handed in is null");
+ }
// Set the name of the output profiling file.
fs::path dumpPath = dumpDir;
@@ -499,7 +502,7 @@ bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
return !tensorInfo.GetShape().AreAllDimensionsSpecified();
}
-bool AreDynamicTensorsSupported()
+bool AreDynamicTensorsSupported()
{
return true;
}
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp
index 8c31a92ff2..ebe3bc4d3d 100644
--- a/shim/sl/canonical/ConversionUtils.cpp
+++ b/shim/sl/canonical/ConversionUtils.cpp
@@ -4,6 +4,7 @@
//
#include "ConversionUtils.hpp"
+#include <armnn/Exceptions.hpp>
#include <armnnUtils/Permute.hpp>
///
@@ -31,7 +32,10 @@ bool LayerInputHandle::IsValid() const
void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
{
- ARMNN_ASSERT(IsValid());
+ if (!IsValid())
+ {
+ throw armnn::Exception("cannot invoke Connect on an invalid LayerInputHandle");
+ }
if (m_OutputSlot)
{
m_OutputSlot->Connect(inputSlot);
@@ -40,7 +44,10 @@ void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
{
- ARMNN_ASSERT(IsValid());
+ if (!IsValid())
+ {
+ throw armnn::Exception("cannot invoke Disconnect on an invalid LayerInputHandle");
+ }
if (m_OutputSlot)
{
m_OutputSlot->Disconnect(inputSlot);
@@ -643,8 +650,11 @@ bool ConvertToActivation(const Operation& operation,
}
armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
+ if (layer == nullptr)
+ {
+ throw armnn::NullPointerException("failed to add activation layer to network");
+ }
layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -692,7 +702,10 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
}
const Operand* operand = GetInputOperand(operationIt, 0, model);
- ARMNN_ASSERT(operand);
+ if (operand == nullptr)
+ {
+ throw armnn::Exception("failed to get input operand 0");
+ }
if (!IsQSymm8(*operand))
{
@@ -716,7 +729,10 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
for (size_t i = 0; i < dequantizedBufferLength; ++i)
{
float* dstPtr = dequantizedBuffer.get();
- ARMNN_ASSERT(dstPtr);
+ if (dstPtr == nullptr)
+ {
+ throw armnn::NullPointerException("dequantizedBuffer unique pointer is null");
+ }
*dstPtr++ = quantizedBuffer[i] * quantizationScale;
}
@@ -892,7 +908,10 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
armnn::IConnectableLayer* prevLayer,
ConversionData& data)
{
- ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
+ if (prevLayer->GetNumOutputSlots() != 1)
+ {
+ throw armnn::Exception("ProcessActivation: previous layer does not have a single output slot");
+ }
prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
diff --git a/shim/sl/canonical/ConversionUtils.hpp b/shim/sl/canonical/ConversionUtils.hpp
index d4b4d92a37..6ee3dbc987 100644
--- a/shim/sl/canonical/ConversionUtils.hpp
+++ b/shim/sl/canonical/ConversionUtils.hpp
@@ -9,6 +9,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/BackendHelper.hpp>
+#include <armnn/Exceptions.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
@@ -23,6 +24,7 @@
#include <armnnUtils/FloatingPointComparison.hpp>
#include <log/log.h>
+#include <sstream>
#include <vector>
inline const android::nn::Model::Subgraph& getMainModel(const android::nn::Model& model) { return model.main; }
@@ -233,7 +235,10 @@ armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
- ARMNN_ASSERT(reshapeLayer != nullptr);
+ if (reshapeLayer == nullptr)
+ {
+ throw armnn::Exception("failed to add reshape layer to network");
+ }
// Attach the input layer to the reshape layer
inputLayer.Connect(reshapeLayer->GetInputSlot(0));
@@ -280,7 +285,10 @@ bool BroadcastTensor(LayerInputHandle& input0,
armnn::IConnectableLayer* startLayer,
ConversionData& data)
{
- ARMNN_ASSERT(startLayer != nullptr);
+ if (startLayer == nullptr)
+ {
+ throw armnn::InvalidArgumentException("BroadcastTensor: startLayer pointer handed in is null");
+ }
const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
@@ -337,7 +345,11 @@ bool BroadcastTensor(LayerInputHandle& input0,
return false;
}
- ARMNN_ASSERT(data.m_Network != nullptr);
+ if (data.m_Network == nullptr)
+ {
+ throw armnn::InvalidArgumentException(
+ "BroadcastTensor: the conversion data handed in has a null network pointer");
+ }
armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
reshapeLayer.SetBackendId(setBackend);
@@ -468,7 +480,10 @@ armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& inp
// Add swizzle layer
armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
- ARMNN_ASSERT(layer != nullptr);
+ if (layer == nullptr)
+ {
+ throw armnn::Exception("failed to add transpose layer to network");
+ }
// Connect input to swizzle layer
input.Connect(layer->GetInputSlot(0));
@@ -596,7 +611,11 @@ bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
{
bool needPermute = false;
- ARMNN_ASSERT(numberOfDimensions >= 3);
+ if (numberOfDimensions < 3)
+ {
+ throw armnn::InvalidArgumentException(
+ "CreateConcatPermutationParameters: numberOfDimensions handed in cannot be less than three");
+ }
// ArmNN uses Compute Library subtensors to perform concatenation
// This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
@@ -655,7 +674,14 @@ inline const Operand* GetInputOperand(const Operation& operation,
}
// Model should have been validated beforehand
- ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
+ if (!(operation.inputs[inputIndex] < getMainModel(model).operands.size()))
+ {
+ std::ostringstream os;
+ os << "GetInputOperand: inputIndex [" << inputIndex << "]";
+ os << " is too large. The number of main model operands is [";
+ os << getMainModel(model).operands.size() << "]";
+ throw armnn::InvalidArgumentException(os.str());
+ }
return &getMainModel(model).operands[operation.inputs[inputIndex]];
}
@@ -670,7 +696,14 @@ inline const Operand* GetOutputOperand(const Operation& operation,
}
// Model should have been validated beforehand
- ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
+ if (!(operation.outputs[outputIndex] < getMainModel(model).operands.size()))
+ {
+ std::ostringstream os;
+ os << "GetOutputOperand: outputIndex [" << outputIndex << "]";
+ os << " is too large. The number of main model operands is [";
+ os << getMainModel(model).operands.size() << "]";
+ throw armnn::InvalidArgumentException(os.str());
+ }
return &getMainModel(model).operands[operation.outputs[outputIndex]];
}
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index a00a0af04c..4ef29a1541 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -5,6 +5,7 @@
#include "Converter.hpp"
#include <half/half.hpp>
+#include <armnn/Exceptions.hpp>
#include <armnnUtils/TensorUtils.hpp>
namespace armnn_driver
@@ -890,7 +891,11 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
if (isDynamicTensor)
{
// Infer the output shapes of concat if outputs are type 1 dynamic
- ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
+ if (!layer->GetOutputSlot(0).IsTensorInfoSet())
+ {
+ throw armnn::Exception(
+ "tensor info is not set on output slot, cannot process dynamic tensor after input reshape");
+ }
if (!ValidateConcatOutputShape(inputShapes,
layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
concatDim))
@@ -4534,8 +4539,11 @@ bool Converter::ConvertReLu(const Operation& operation, const Model& model, Conv
}
armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
+ if (layer == nullptr)
+ {
+ throw armnn::NullPointerException("failed to add Activation Layer to network");
+ }
layer->SetBackendId(setBackend);
- ARMNN_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);