aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2023-06-27 22:34:54 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-08-04 08:34:55 +0000
commitc229b3fd81b42140c0fa8731e90bc07323cec794 (patch)
tree1962789e2810be81cd56a2084b0f7a962f0e4e38 /src
parentc377eb8305e6fdc0f4d00bb4766827fc3087bf25 (diff)
downloadarmnn-c229b3fd81b42140c0fa8731e90bc07323cec794.tar.gz
IVGCVSW-7676 Audit the use of ARMNN_ASSERT
* Replace most ARMNN_ASSERT's from tflite parser * Replace most ARMNN_ASSERT's from onnx parser * Replace some ARMNN_ASSERT's from tflite delegate * Replace some ARMNN_ASSERT;s from include files Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ie052e0180060203f28f64ebf54acad298f431caf
Diffstat (limited to 'src')
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp149
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp431
2 files changed, 480 insertions, 100 deletions
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index c0b42d9033..f165df9e14 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -500,7 +500,11 @@ std::vector<TensorInfo> OnnxParserImpl::ComputeOutputInfo(std::vector<std::strin
std::vector<TensorShape> inputShapes,
const onnx::TensorProto::DataType& dataType)
{
- ARMNN_ASSERT(! outNames.empty());
+ if (outNames.empty())
+ {
+ throw armnn::ParseException(fmt::format("Output names are empty {}", CHECK_LOCATION().AsString()));
+ }
+
bool needCompute = std::any_of(outNames.begin(),
outNames.end(),
[this](std::string name)
@@ -516,7 +520,11 @@ std::vector<TensorInfo> OnnxParserImpl::ComputeOutputInfo(std::vector<std::strin
DataType armnnType = DataType::Float32;
if(needCompute) {
inferredShapes = layer->InferOutputShapes(inputShapes);
- ARMNN_ASSERT(inferredShapes.size() == outNames.size());
+ if (inferredShapes.size() != outNames.size())
+ {
+ throw armnn::ParseException(fmt::format("Inferred shapes does not match number of output names {}",
+ CHECK_LOCATION().AsString()));
+ }
switch (dataType) {
case onnx::TensorProto::FLOAT: {
armnnType = DataType::Float32;
@@ -579,7 +587,10 @@ CreateConstTensorImpl(const T* bufferPtr,
armnn::TensorInfo& tensorInfo,
const armnn::Optional<armnn::PermutationVector&> permutationVector)
{
- ARMNN_ASSERT_MSG(bufferPtr != nullptr, fmt::format("Buffer for permutation is null").c_str());
+ if (bufferPtr == nullptr)
+ {
+ throw armnn::ParseException(fmt::format("Buffer for permutation is null {}", CHECK_LOCATION().AsString()));
+ }
std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
@@ -879,7 +890,10 @@ INetworkPtr OnnxParserImpl::CreateNetworkFromModel(onnx::ModelProto& model)
void OnnxParserImpl::LoadGraph()
{
- ARMNN_ASSERT(m_Graph.get() != nullptr);
+ if (m_Graph.get() == nullptr)
+ {
+ throw armnn::ParseException(fmt::format("Graph pointer is null {}", CHECK_LOCATION().AsString()));
+ }
//Fill m_TensorsInfo with the shapes and value of every tensor
SetupInfo(m_Graph->mutable_output());
@@ -1131,7 +1145,10 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node,
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
}
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[node.input(0)].m_info->GetShape(),
@@ -1208,7 +1225,11 @@ void OnnxParserImpl::AddFullyConnected(const onnx::NodeProto& matmulNode, const
// Just add a FullyConnected layer, weights and biases are handled as inputs now.
layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
if (inputInfo.GetNumDimensions() > 2)
{
@@ -1386,7 +1407,11 @@ void OnnxParserImpl::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescr
}
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1447,7 +1472,12 @@ void OnnxParserImpl::CreateReshapeLayer(const std::string& inputName,
reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
+
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// register the input connection slots for the layer, connections are made after all layers have been created
@@ -1483,7 +1513,11 @@ void OnnxParserImpl::ParseActivation(const onnx::NodeProto& node, const armnn::A
}
IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1534,7 +1568,12 @@ void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node)
auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
auto input0 = *m_TensorsInfo[inputs.first].m_info;
auto input1 = *m_TensorsInfo[inputs.second].m_info;
- ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ if (input0.GetNumDimensions() != input1.GetNumDimensions())
+ {
+ throw armnn::ParseException(fmt::format("Dimension mismatch in node {} {}",
+ node.name(),
+ CHECK_LOCATION().AsString()));
+ }
unsigned int numDims = input0.GetNumDimensions();
for (unsigned int i = 0; i < numDims; i++)
@@ -1558,7 +1597,11 @@ void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node)
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[inputs.first].m_info->GetShape(),
@@ -1627,7 +1670,11 @@ void OnnxParserImpl::ParseBatchNormalization(const onnx::NodeProto& node)
biasTensor.first,
scaleTensor.first,
node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1671,7 +1718,11 @@ void OnnxParserImpl::ParseConcat(const onnx::NodeProto& node)
}
IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, inputShapes,
m_TensorsInfo[node.input(0)].m_dtype);
@@ -1896,7 +1947,10 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node)
tensorIndexes.emplace_back(node.input(2));
}
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[node.input(0)].m_info->GetShape(),
@@ -1967,7 +2021,11 @@ void OnnxParserImpl::ParseGather(const onnx::NodeProto& node)
gatherDescriptor.m_Axis = static_cast<int>(ReadOptionalNodeInt64Attribute(node, "axis", 0));
IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
const TensorShape& inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
const TensorShape& indicesShape = m_TensorsInfo[node.input(1)].m_info->GetShape();
@@ -2005,7 +2063,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
// Just add a FullyConnected layer, weights and biases are handled as inputs now.
layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
// if transA != 0, add transpose to the input0
if (transA != 0)
@@ -2014,7 +2076,12 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
armnn::TransposeDescriptor transposeADescriptor;
transposeADescriptor.m_DimMappings = { 1, 0 };
IConnectableLayer* transALayer = m_Network->AddTransposeLayer(transposeADescriptor, transAName.c_str());
- ARMNN_ASSERT(transALayer != nullptr);
+
+ if (!transALayer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
+
auto transAInfo = ComputeOutputInfo({ transAName }, transALayer, { input0Shape });
transALayer->GetOutputSlot(0).SetTensorInfo(transAInfo[0]);
transALayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
@@ -2043,7 +2110,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
activationDescriptor.m_A = alpha;
activationDescriptor.m_Function = ActivationFunction::Linear;
IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
- ARMNN_ASSERT(actLayer != nullptr);
+
+ if (!actLayer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { weightInfo.GetShape() });
actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
@@ -2067,7 +2138,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
activationDescriptor.m_A = alpha;
activationDescriptor.m_Function = ActivationFunction::Linear;
IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
- ARMNN_ASSERT(actLayer != nullptr);
+
+ if (!actLayer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { input1Shape });
actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
@@ -2097,7 +2172,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
activationDescriptor.m_A = beta;
activationDescriptor.m_Function = ActivationFunction::Linear;
IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
- ARMNN_ASSERT(actLayer != nullptr);
+
+ if (!actLayer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { biasInfo.GetShape() });
actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]);
@@ -2128,7 +2207,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node)
activationDescriptor.m_A = beta;
activationDescriptor.m_Function = ActivationFunction::Linear;
IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str());
- ARMNN_ASSERT(actLayer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto actInfo = ComputeOutputInfo({ activationName },
actLayer,
@@ -2162,7 +2245,11 @@ void OnnxParserImpl::ParseGlobalAveragePool(const onnx::NodeProto& node)
desc.m_PoolHeight = inputShape[2];
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -2189,7 +2276,11 @@ void OnnxParserImpl::ParseShape(const onnx::NodeProto& node)
CHECK_VALID_SIZE(static_cast<size_t>(node.output_size()), 1);
IConnectableLayer* layer = m_Network->AddShapeLayer(node.name().c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape}, onnx::TensorProto::INT64);
@@ -2439,7 +2530,11 @@ void OnnxParserImpl::RegisterInputSlot(IConnectableLayer* layer,
void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
{
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
+
if (tensorIds.size() != layer->GetNumInputSlots())
{
throw ParseException(
@@ -2467,7 +2562,11 @@ void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vec
void OnnxParserImpl::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
{
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString()));
+ }
+
if (tensorIds.size() != layer->GetNumOutputSlots())
{
throw ParseException(
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index c0e52b2113..894da6ab5e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -188,14 +188,6 @@ void CheckTensor(const TfLiteParserImpl::ModelPtr& model,
size_t tensorIndex,
const CheckLocation& location)
{
- // not checking model, because I assume CHECK_MODEL already run
- // and checked that. An assert would do.
- ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
-
- // also subgraph index should be checked by CHECK_MODEL so
- // I only add an assert here
- ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
-
// the tensor index is the only one to check here
if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
{
@@ -617,9 +609,16 @@ CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
armnn::Optional<armnn::PermutationVector&> permutationVector)
{
IgnoreUnused(tensorPtr);
- ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
- ARMNN_ASSERT_MSG(bufferPtr != nullptr,
- fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
+
+ if (!tensorPtr)
+ {
+ throw armnn::ParseException(fmt::format("Tensor pointer is null {}", CHECK_LOCATION().AsString()));
+ }
+
+ if (!bufferPtr)
+ {
+ throw armnn::ParseException(fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str());
+ }
std::unique_ptr<T[]> data(new T[tensorInfo.GetNumElements()]);
@@ -999,7 +998,11 @@ INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
}
}
m_Network = INetwork::Create(networkOptions);
- ARMNN_ASSERT(m_Model.get() != nullptr);
+
+ if (m_Model.get() == nullptr)
+ {
+ throw ParseException(fmt::format("Tflite Model pointer is null {}", CHECK_LOCATION().AsString()));
+ }
if (m_Model->subgraphs.size() != 1)
{
@@ -1101,8 +1104,6 @@ void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
armnn::IOutputSlot* slot)
{
CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
- ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
- ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
@@ -1127,8 +1128,6 @@ void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
armnn::IInputSlot* slot)
{
CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
- ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
- ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
tensorSlots.inputSlots.push_back(slot);
@@ -1198,7 +1197,12 @@ void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t ope
// Add a non-executable StandInLayer as a placeholder for any unsupported operator
IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
for (unsigned int i = 0u; i < numOutputs; ++i)
{
@@ -1224,7 +1228,12 @@ void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1301,7 +1310,11 @@ void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
}
}
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1381,7 +1394,12 @@ void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex)
}
armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1457,7 +1475,12 @@ void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operato
// Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers.
tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]);
}
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1485,7 +1508,12 @@ void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorInde
auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1556,8 +1584,12 @@ void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorInde
outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ } layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
@@ -1596,7 +1628,12 @@ void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex
TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -1736,7 +1773,11 @@ void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorI
layerName.c_str());
}
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 });
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1780,7 +1821,12 @@ void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorInd
// Arbitrary DataLayout
IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1831,7 +1877,12 @@ void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operator
TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -1859,7 +1910,11 @@ void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operato
auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1893,7 +1948,12 @@ void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -1923,7 +1983,12 @@ void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -1960,7 +2025,7 @@ void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex);
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
+ throw ParseException(fmt::format("Unsupported Pooling Algorithm {}", CHECK_LOCATION().AsString()));
}
Pooling2dDescriptor desc;
@@ -1991,7 +2056,12 @@ void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
CHECK_VALID_SIZE(outputs.size(), 1);
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -2180,7 +2250,12 @@ void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operator
TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -2218,7 +2293,13 @@ void TfLiteParserImpl::ParseSpaceToDepth(size_t subgraphIndex, size_t operatorIn
auto layerName = fmt::format("SpaceToDepth:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2292,7 +2373,12 @@ void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2354,7 +2440,13 @@ void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -2431,7 +2523,12 @@ void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIn
auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2461,7 +2558,12 @@ void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2495,7 +2597,12 @@ void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2526,7 +2633,12 @@ void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2557,7 +2669,12 @@ void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2591,7 +2708,12 @@ void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2651,7 +2773,12 @@ void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2751,7 +2878,13 @@ void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
: fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2822,7 +2955,13 @@ void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex
auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2849,8 +2988,12 @@ void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
if (IsConstTensor(inputs[1]))
{
@@ -2863,7 +3006,12 @@ void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name);
IConnectableLayer* constLayer =
m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str());
- ARMNN_ASSERT(constLayer != nullptr);
+
+ if (!constLayer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -2899,7 +3047,12 @@ void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -3215,7 +3368,13 @@ void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo;
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -3289,7 +3448,13 @@ void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, R
TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -3408,7 +3573,13 @@ void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorI
auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -3487,7 +3658,11 @@ void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operator
// Filters and biases are always passed to fully connected as inputs
layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
unsigned int startingSlotIndex = 0;
if (inputTensorInfo.GetNumDimensions() > 2)
@@ -3615,7 +3790,11 @@ void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t op
IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData,
layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
// The model does not specify the output shapes.
// The output shapes are calculated from the max_detection and max_classes_per_detection.
@@ -3672,7 +3851,11 @@ void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -3936,7 +4119,12 @@ void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, siz
auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex);
armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
// register the input connection slots for the layer, connections are made after all layers have been created
// only the tensors for the inputs are relevant, exclude the const tensors
@@ -4029,7 +4217,12 @@ void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorShape splitOutShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
unpackDimSizes.data());
@@ -4093,7 +4286,12 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
- ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
+
+ if (axisTensorInfo.GetNumElements() != 1)
+ {
+ throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
+ CHECK_LOCATION().AsString()));
+ }
BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
if (axisBufferPtr == nullptr)
@@ -4158,7 +4356,12 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
@@ -4177,8 +4380,11 @@ unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
{
int numDims = armnn::numeric_cast<int>(numDimsIn);
int v = idx < 0 ? numDims + idx : idx;
- ARMNN_ASSERT(v >= 0);
- ARMNN_ASSERT(v < numDims);
+
+ if (v < 0 || v > numDims)
+ {
+ throw ParseException(fmt::format("Unable to compute index {}", CHECK_LOCATION().AsString()));
+ }
return static_cast<unsigned int>(v);
}
@@ -4200,7 +4406,12 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor);
armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor);
armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor);
- ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
+
+ if (axisTensorInfo.GetNumElements() != 1)
+ {
+ throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
+ CHECK_LOCATION().AsString()));
+ }
// Inputs
auto inputDimSize = inputTensorInfo.GetNumDimensions();
@@ -4324,7 +4535,12 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
@@ -4361,7 +4577,12 @@ void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex
armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
- ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
+
+ if (axisTensorInfo.GetNumElements() != 1)
+ {
+ throw ParseException(fmt::format("Axis tensor can only have 1 element {}",
+ CHECK_LOCATION().AsString()));
+ }
// Check if output tensor type is Signed32 or Signed64
if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
@@ -4406,7 +4627,13 @@ void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex
auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}";
auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -4463,7 +4690,13 @@ void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
gatherDescriptor.m_Axis = axis;
IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -4488,7 +4721,13 @@ void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex)
auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -4524,7 +4763,13 @@ void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIn
auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex);
IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -4649,7 +4894,12 @@ void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, siz
descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -4708,7 +4958,12 @@ void TfLiteParserImpl::ParsePower(size_t subgraphIndex, size_t operatorIndex)
CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Power, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -4780,7 +5035,12 @@ void TfLiteParserImpl::ParseSquaredDifference(size_t subgraphIndex, size_t opera
CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::SqDiff, layerName.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
@@ -4809,7 +5069,12 @@ void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operat
ElementwiseUnaryDescriptor desc;
desc.m_Operation = unaryOperation;
IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -4872,7 +5137,12 @@ void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorInde
ComparisonDescriptor desc;
desc.m_Operation = comparisonOperation;
IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str());
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1});
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -5139,7 +5409,12 @@ void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
unsigned int startingSlotIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots())
{
@@ -5167,7 +5442,13 @@ void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
const std::vector<unsigned int>& tensorIndexes)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- ARMNN_ASSERT(layer != nullptr);
+
+ if (!layer)
+ {
+ throw NullPointerException(fmt::format("Layer {} pointer is null {}",
+ operatorIndex, CHECK_LOCATION().AsString()));
+ }
+
if (tensorIndexes.size() != layer->GetNumOutputSlots())
{
throw ParseException(