From c229b3fd81b42140c0fa8731e90bc07323cec794 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Tue, 27 Jun 2023 22:34:54 +0100 Subject: IVGCVSW-7676 Audit the use of ARMNN_ASSERT * Replace most ARMNN_ASSERT's from tflite parser * Replace most ARMNN_ASSERT's from onnx parser * Replace some ARMNN_ASSERT's from tflite delegate * Replace some ARMNN_ASSERT;s from include files Signed-off-by: Ryan OShea Change-Id: Ie052e0180060203f28f64ebf54acad298f431caf --- delegate/classic/src/ClassicDelegateUtils.hpp | 5 +- delegate/classic/src/Split.hpp | 18 +- delegate/classic/src/Transpose.hpp | 18 +- delegate/classic/src/armnn_delegate.cpp | 7 +- delegate/common/src/DelegateUtils.hpp | 7 +- delegate/opaque/src/Split.hpp | 18 +- delegate/opaque/src/Transpose.hpp | 16 +- delegate/opaque/src/armnn_delegate.cpp | 7 +- include/armnn/backends/Workload.hpp | 53 ++-- include/armnnUtils/DataLayoutIndexed.hpp | 29 +- src/armnnOnnxParser/OnnxParser.cpp | 149 +++++++-- src/armnnTfLiteParser/TfLiteParser.cpp | 431 +++++++++++++++++++++----- 12 files changed, 612 insertions(+), 146 deletions(-) diff --git a/delegate/classic/src/ClassicDelegateUtils.hpp b/delegate/classic/src/ClassicDelegateUtils.hpp index 8a9409df6a..8d6f0495f8 100644 --- a/delegate/classic/src/ClassicDelegateUtils.hpp +++ b/delegate/classic/src/ClassicDelegateUtils.hpp @@ -193,7 +193,10 @@ TfLiteStatus Connect(armnn::IConnectableLayer* layer, TfLiteNode* tfLiteNode, armnnDelegate::DelegateData& data) { - ARMNN_ASSERT(static_cast(tfLiteNode->outputs->size) == layer->GetNumOutputSlots()); + if (static_cast(tfLiteNode->outputs->size) != layer->GetNumOutputSlots()) + { + return kTfLiteError; + } // Connect the input slots for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex) diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp index fcd901b23e..aaa610259f 100644 --- a/delegate/classic/src/Split.hpp +++ b/delegate/classic/src/Split.hpp @@ -44,7 +44,11 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData, const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); - ARMNN_ASSERT(GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() == 1); + if (GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() != 1) + { + return kTfLiteError; + } + auto* axisTensorDataPtr = tflite::GetTensorData(&tfLiteAxisTensor); std::vector axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1); int32_t axis = axisTensorData[0]; @@ -183,8 +187,16 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData, const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteSplitsTensor); - ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1); - ARMNN_ASSERT(GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() == 1); + + if (splitsTensorInfo.GetNumDimensions() != 1) + { + return kTfLiteError; + } + + if (GetTensorInfoForTfLiteTensor(tfLiteAxisTensor).GetNumElements() != 1) + { + return kTfLiteError; + } auto* axisTensorDataPtr = tflite::GetTensorData(&tfLiteAxisTensor); std::vector axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1); diff --git a/delegate/classic/src/Transpose.hpp b/delegate/classic/src/Transpose.hpp index 247ddf7e9e..9aa316d8c3 100644 --- a/delegate/classic/src/Transpose.hpp +++ b/delegate/classic/src/Transpose.hpp @@ -63,8 +63,15 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData, auto* permTensorDataPtr = tflite::GetTensorData(&tfLiteInputTensor1); unsigned int numEl = tfLiteInputTensor1.dims->data[0]; - ARMNN_ASSERT( numEl <= static_cast(armnn::MaxNumOfTensorDimensions)); - ARMNN_ASSERT( tfLiteInputTensor1.dims->size == 1); // ensure only single dimension to the permutation tensor + if (numEl > static_cast(armnn::MaxNumOfTensorDimensions)) + { + return kTfLiteError; + } + + if (tfLiteInputTensor1.dims->size != 1) + { + return kTfLiteError; + } armnn::TransposeDescriptor descriptor(armnn::PermutationVector( reinterpret_cast (permTensorDataPtr), @@ -95,7 +102,12 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData, armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor, layerName.c_str()); transposeLayer->SetBackendId(setBackend); ARMNN_ASSERT(transposeLayer != nullptr); - ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object + + // permutation vector given to descriptor object + if (transposeLayer->GetNumInputSlots() != 1) + { + return kTfLiteError; + } armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index 2483835989..de2aa0c632 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -312,7 +312,12 @@ TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData, armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId); auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor); - ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast(tensorId)] != nullptr); + + if (delegateData.m_OutputSlotForNode[static_cast(tensorId)] == nullptr) + { + return kTfLiteError; + } + delegateData.m_OutputSlotForNode[static_cast(tensorId)]->Connect(layer->GetInputSlot(0)); outputBindings.push_back(std::make_pair(bindingId, tensorInfo)); } diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp index 418cad313f..a74ed8b549 100644 --- a/delegate/common/src/DelegateUtils.hpp +++ b/delegate/common/src/DelegateUtils.hpp @@ -109,8 +109,11 @@ unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions) { int numDims = armnn::numeric_cast(numDimensions); int wrappedIndex = index < 0 ? numDims + index : index; - ARMNN_ASSERT(wrappedIndex >= 0); - ARMNN_ASSERT(wrappedIndex < numDims); + + if (wrappedIndex < 0 || wrappedIndex >= numDims) + { + throw armnn::ParseException("Unable to compute wrapped index"); + } return static_cast(wrappedIndex); }; diff --git a/delegate/opaque/src/Split.hpp b/delegate/opaque/src/Split.hpp index aec0fb674a..2dbfa602fb 100644 --- a/delegate/opaque/src/Split.hpp +++ b/delegate/opaque/src/Split.hpp @@ -68,7 +68,11 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData, const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); - ARMNN_ASSERT(GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() == 1); + if (GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() != 1) + { + return kTfLiteError; + } + auto* axisTensorDataPtr = static_cast(TfLiteOpaqueTensorData(tfLiteAxisTensor)); std::vector axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1); int32_t axis = axisTensorData[0]; @@ -230,8 +234,16 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData, const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteSplitsTensor); - ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1); - ARMNN_ASSERT(GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() == 1); + + if (splitsTensorInfo.GetNumDimensions() != 1) + { + return kTfLiteError; + } + + if (GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() != 1) + { + return kTfLiteError; + } auto* axisTensorDataPtr = static_cast(TfLiteOpaqueTensorData(tfLiteAxisTensor)); std::vector axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1); diff --git a/delegate/opaque/src/Transpose.hpp b/delegate/opaque/src/Transpose.hpp index 5af03b3790..4b2bdf376a 100644 --- a/delegate/opaque/src/Transpose.hpp +++ b/delegate/opaque/src/Transpose.hpp @@ -65,9 +65,16 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData, auto* permTensorDataPtr = static_cast(TfLiteOpaqueTensorData(tfLiteInputTensor1)); unsigned int numEl = TfLiteOpaqueTensorDim(tfLiteInputTensor1, 0); - ARMNN_ASSERT( numEl <= static_cast(armnn::MaxNumOfTensorDimensions) ); + if ( numEl > static_cast(armnn::MaxNumOfTensorDimensions) ) + { + return kTfLiteError; + } + // Ensure only single dimension to the permutation tensor - ARMNN_ASSERT( TfLiteOpaqueTensorNumDims(tfLiteInputTensor1) == 1 ); + if ( TfLiteOpaqueTensorNumDims(tfLiteInputTensor1) != 1 ) + { + return kTfLiteError; + } armnn::TransposeDescriptor descriptor(armnn::PermutationVector( reinterpret_cast (permTensorDataPtr), @@ -99,7 +106,10 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData, transposeLayer->SetBackendId(setBackend); ARMNN_ASSERT(transposeLayer != nullptr); // Permutation vector given to descriptor object - ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); + if (transposeLayer->GetNumInputSlots() != 1) + { + return kTfLiteError; + } armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 60da293eb2..bad1abaa59 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -368,7 +368,12 @@ TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData, armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId); auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor); - ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast(tensorId)] != nullptr); + + if (delegateData.m_OutputSlotForNode[static_cast(tensorId)] == nullptr) + { + return kTfLiteError; + } + delegateData.m_OutputSlotForNode[static_cast(tensorId)]->Connect(layer->GetInputSlot(0)); outputBindings.push_back(std::make_pair(bindingId, tensorInfo)); } diff --git a/include/armnn/backends/Workload.hpp b/include/armnn/backends/Workload.hpp index 9d5fec98cd..e0647c24d5 100644 --- a/include/armnn/backends/Workload.hpp +++ b/include/armnn/backends/Workload.hpp @@ -114,14 +114,16 @@ public: if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end()) { - ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type"); + throw armnn::Exception("Trying to create workload with incorrect type"); } - ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()), + if (std::all_of(std::next(info.m_InputTensorInfos.begin()), info.m_InputTensorInfos.end(), [&](auto it){ return it.GetDataType() == expectedInputType; - }), - "Trying to create workload with incorrect type"); + }) == false) + { + throw armnn::Exception("Trying to create workload with incorrect type"); + } } armnn::DataType expectedOutputType; @@ -135,19 +137,21 @@ public: if (expectedOutputType != expectedInputType) { - ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type"); + throw armnn::Exception( "Trying to create workload with incorrect type"); } } else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end()) { - ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type"); + throw armnn::Exception("Trying to create workload with incorrect type"); } - ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()), + if (std::all_of(std::next(info.m_OutputTensorInfos.begin()), info.m_OutputTensorInfos.end(), [&](auto it){ return it.GetDataType() == expectedOutputType; - }), - "Trying to create workload with incorrect type"); + }) == false) + { + throw armnn::Exception("Trying to create workload with incorrect type"); + } } } }; @@ -160,19 +164,22 @@ public: MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) { - ARMNN_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(), + if (std::all_of(info.m_InputTensorInfos.begin(), info.m_InputTensorInfos.end(), [&](auto it){ return it.GetDataType() == InputDataType; - }), - "Trying to create workload with incorrect type"); - - ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(), + }) == false) + { + throw armnn::Exception("Trying to create workload with incorrect type"); + } + if (std::all_of(info.m_OutputTensorInfos.begin(), info.m_OutputTensorInfos.end(), [&](auto it){ return it.GetDataType() == OutputDataType; - }), - "Trying to create workload with incorrect type"); + }) == false) + { + throw armnn::Exception("Trying to create workload with incorrect type"); + } } }; @@ -187,16 +194,20 @@ public: { if (!info.m_InputTensorInfos.empty()) { - ARMNN_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType, - "Trying to create workload with incorrect type"); + if (info.m_InputTensorInfos.front().GetDataType() != DataType) + { + throw armnn::Exception("Trying to create workload with incorrect type"); + } } - ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(), + if (std::all_of(info.m_OutputTensorInfos.begin(), info.m_OutputTensorInfos.end(), [&](auto it){ return it.GetDataType() == DataType; - }), - "Trying to create workload with incorrect type"); + }) == false) + { + throw armnn::Exception("Trying to create workload with incorrect type"); + } } }; diff --git a/include/armnnUtils/DataLayoutIndexed.hpp b/include/armnnUtils/DataLayoutIndexed.hpp index 163d34b159..e57cec531f 100644 --- a/include/armnnUtils/DataLayoutIndexed.hpp +++ b/include/armnnUtils/DataLayoutIndexed.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2018-2021,2023 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -29,13 +29,26 @@ public: unsigned int batchIndex, unsigned int channelIndex, unsigned int heightIndex, unsigned int widthIndex) const { - ARMNN_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) ); - ARMNN_ASSERT( channelIndex < shape[m_ChannelsIndex] || - ( shape[m_ChannelsIndex] == 0 && channelIndex == 0) ); - ARMNN_ASSERT( heightIndex < shape[m_HeightIndex] || - ( shape[m_HeightIndex] == 0 && heightIndex == 0) ); - ARMNN_ASSERT( widthIndex < shape[m_WidthIndex] || - ( shape[m_WidthIndex] == 0 && widthIndex == 0) ); + if (batchIndex >= shape[0] && !( shape[0] == 0 && batchIndex == 0)) + { + throw armnn::Exception("Unable to get batch index", CHECK_LOCATION()); + } + if (channelIndex >= shape[m_ChannelsIndex] && + !(shape[m_ChannelsIndex] == 0 && channelIndex == 0)) + { + throw armnn::Exception("Unable to get channel index", CHECK_LOCATION()); + + } + if (heightIndex >= shape[m_HeightIndex] && + !( shape[m_HeightIndex] == 0 && heightIndex == 0)) + { + throw armnn::Exception("Unable to get height index", CHECK_LOCATION()); + } + if (widthIndex >= shape[m_WidthIndex] && + ( shape[m_WidthIndex] == 0 && widthIndex == 0)) + { + throw armnn::Exception("Unable to get width index", CHECK_LOCATION()); + } /// Offset the given indices appropriately depending on the data layout switch (m_DataLayout) diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp index c0b42d9033..f165df9e14 100644 --- a/src/armnnOnnxParser/OnnxParser.cpp +++ b/src/armnnOnnxParser/OnnxParser.cpp @@ -500,7 +500,11 @@ std::vector OnnxParserImpl::ComputeOutputInfo(std::vector inputShapes, const onnx::TensorProto::DataType& dataType) { - ARMNN_ASSERT(! outNames.empty()); + if (outNames.empty()) + { + throw armnn::ParseException(fmt::format("Output names are empty {}", CHECK_LOCATION().AsString())); + } + bool needCompute = std::any_of(outNames.begin(), outNames.end(), [this](std::string name) @@ -516,7 +520,11 @@ std::vector OnnxParserImpl::ComputeOutputInfo(std::vectorInferOutputShapes(inputShapes); - ARMNN_ASSERT(inferredShapes.size() == outNames.size()); + if (inferredShapes.size() != outNames.size()) + { + throw armnn::ParseException(fmt::format("Inferred shapes does not match number of output names {}", + CHECK_LOCATION().AsString())); + } switch (dataType) { case onnx::TensorProto::FLOAT: { armnnType = DataType::Float32; @@ -579,7 +587,10 @@ CreateConstTensorImpl(const T* bufferPtr, armnn::TensorInfo& tensorInfo, const armnn::Optional permutationVector) { - ARMNN_ASSERT_MSG(bufferPtr != nullptr, fmt::format("Buffer for permutation is null").c_str()); + if (bufferPtr == nullptr) + { + throw armnn::ParseException(fmt::format("Buffer for permutation is null {}", CHECK_LOCATION().AsString())); + } std::unique_ptr data(new T[tensorInfo.GetNumElements()]); @@ -879,7 +890,10 @@ INetworkPtr OnnxParserImpl::CreateNetworkFromModel(onnx::ModelProto& model) void OnnxParserImpl::LoadGraph() { - ARMNN_ASSERT(m_Graph.get() != nullptr); + if (m_Graph.get() == nullptr) + { + throw armnn::ParseException(fmt::format("Graph pointer is null {}", CHECK_LOCATION().AsString())); + } //Fill m_TensorsInfo with the shapes and value of every tensor SetupInfo(m_Graph->mutable_output()); @@ -1131,7 +1145,10 @@ void OnnxParserImpl::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u)); } - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer, { m_TensorsInfo[node.input(0)].m_info->GetShape(), @@ -1208,7 +1225,11 @@ void OnnxParserImpl::AddFullyConnected(const onnx::NodeProto& matmulNode, const // Just add a FullyConnected layer, weights and biases are handled as inputs now. layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } if (inputInfo.GetNumDimensions() > 2) { @@ -1386,7 +1407,11 @@ void OnnxParserImpl::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescr } IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()}); layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]); @@ -1447,7 +1472,12 @@ void OnnxParserImpl::CreateReshapeLayer(const std::string& inputName, reshapeDesc.m_TargetShape = outputTensorInfo.GetShape(); IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // register the input connection slots for the layer, connections are made after all layers have been created @@ -1483,7 +1513,11 @@ void OnnxParserImpl::ParseActivation(const onnx::NodeProto& node, const armnn::A } IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()}); layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]); @@ -1534,7 +1568,12 @@ void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node) auto inputs = AddPrepareBroadcast(node.input(0), node.input(1)); auto input0 = *m_TensorsInfo[inputs.first].m_info; auto input1 = *m_TensorsInfo[inputs.second].m_info; - ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); + if (input0.GetNumDimensions() != input1.GetNumDimensions()) + { + throw armnn::ParseException(fmt::format("Dimension mismatch in node {} {}", + node.name(), + CHECK_LOCATION().AsString())); + } unsigned int numDims = input0.GetNumDimensions(); for (unsigned int i = 0; i < numDims; i++) @@ -1558,7 +1597,11 @@ void OnnxParserImpl::ParseAdd(const onnx::NodeProto& node) IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer, { m_TensorsInfo[inputs.first].m_info->GetShape(), @@ -1627,7 +1670,11 @@ void OnnxParserImpl::ParseBatchNormalization(const onnx::NodeProto& node) biasTensor.first, scaleTensor.first, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()}); layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]); @@ -1671,7 +1718,11 @@ void OnnxParserImpl::ParseConcat(const onnx::NodeProto& node) } IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, inputShapes, m_TensorsInfo[node.input(0)].m_dtype); @@ -1896,7 +1947,10 @@ void OnnxParserImpl::ParseConv(const onnx::NodeProto& node) tensorIndexes.emplace_back(node.input(2)); } - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer, { m_TensorsInfo[node.input(0)].m_info->GetShape(), @@ -1967,7 +2021,11 @@ void OnnxParserImpl::ParseGather(const onnx::NodeProto& node) gatherDescriptor.m_Axis = static_cast(ReadOptionalNodeInt64Attribute(node, "axis", 0)); IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } const TensorShape& inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape(); const TensorShape& indicesShape = m_TensorsInfo[node.input(1)].m_info->GetShape(); @@ -2005,7 +2063,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node) // Just add a FullyConnected layer, weights and biases are handled as inputs now. layer = m_Network->AddFullyConnectedLayer(fullyConnectedDescriptor, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } // if transA != 0, add transpose to the input0 if (transA != 0) @@ -2014,7 +2076,12 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node) armnn::TransposeDescriptor transposeADescriptor; transposeADescriptor.m_DimMappings = { 1, 0 }; IConnectableLayer* transALayer = m_Network->AddTransposeLayer(transposeADescriptor, transAName.c_str()); - ARMNN_ASSERT(transALayer != nullptr); + + if (!transALayer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } + auto transAInfo = ComputeOutputInfo({ transAName }, transALayer, { input0Shape }); transALayer->GetOutputSlot(0).SetTensorInfo(transAInfo[0]); transALayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u)); @@ -2043,7 +2110,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node) activationDescriptor.m_A = alpha; activationDescriptor.m_Function = ActivationFunction::Linear; IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str()); - ARMNN_ASSERT(actLayer != nullptr); + + if (!actLayer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { weightInfo.GetShape() }); actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]); @@ -2067,7 +2138,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node) activationDescriptor.m_A = alpha; activationDescriptor.m_Function = ActivationFunction::Linear; IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str()); - ARMNN_ASSERT(actLayer != nullptr); + + if (!actLayer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { input1Shape }); actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]); @@ -2097,7 +2172,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node) activationDescriptor.m_A = beta; activationDescriptor.m_Function = ActivationFunction::Linear; IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str()); - ARMNN_ASSERT(actLayer != nullptr); + + if (!actLayer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto actInfo = ComputeOutputInfo({ activationName }, actLayer, { biasInfo.GetShape() }); actLayer->GetOutputSlot(0).SetTensorInfo(actInfo[0]); @@ -2128,7 +2207,11 @@ void OnnxParserImpl::ParseGemm(const onnx::NodeProto& node) activationDescriptor.m_A = beta; activationDescriptor.m_Function = ActivationFunction::Linear; IConnectableLayer* actLayer = m_Network->AddActivationLayer(activationDescriptor, activationName.c_str()); - ARMNN_ASSERT(actLayer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto actInfo = ComputeOutputInfo({ activationName }, actLayer, @@ -2162,7 +2245,11 @@ void OnnxParserImpl::ParseGlobalAveragePool(const onnx::NodeProto& node) desc.m_PoolHeight = inputShape[2]; IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape}); layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]); @@ -2189,7 +2276,11 @@ void OnnxParserImpl::ParseShape(const onnx::NodeProto& node) CHECK_VALID_SIZE(static_cast(node.output_size()), 1); IConnectableLayer* layer = m_Network->AddShapeLayer(node.name().c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape(); auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape}, onnx::TensorProto::INT64); @@ -2439,7 +2530,11 @@ void OnnxParserImpl::RegisterInputSlot(IConnectableLayer* layer, void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vector& tensorIds) { - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } + if (tensorIds.size() != layer->GetNumInputSlots()) { throw ParseException( @@ -2467,7 +2562,11 @@ void OnnxParserImpl::RegisterInputSlots(IConnectableLayer* layer, const std::vec void OnnxParserImpl::RegisterOutputSlots(IConnectableLayer* layer, const std::vector& tensorIds) { - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw armnn::NullPointerException(fmt::format("Layer pointer is null {}", CHECK_LOCATION().AsString())); + } + if (tensorIds.size() != layer->GetNumOutputSlots()) { throw ParseException( diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index c0e52b2113..894da6ab5e 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -188,14 +188,6 @@ void CheckTensor(const TfLiteParserImpl::ModelPtr& model, size_t tensorIndex, const CheckLocation& location) { - // not checking model, because I assume CHECK_MODEL already run - // and checked that. An assert would do. - ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function"); - - // also subgraph index should be checked by CHECK_MODEL so - // I only add an assert here - ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index"); - // the tensor index is the only one to check here if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size()) { @@ -617,9 +609,16 @@ CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr, armnn::Optional permutationVector) { IgnoreUnused(tensorPtr); - ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null"); - ARMNN_ASSERT_MSG(bufferPtr != nullptr, - fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str()); + + if (!tensorPtr) + { + throw armnn::ParseException(fmt::format("Tensor pointer is null {}", CHECK_LOCATION().AsString())); + } + + if (!bufferPtr) + { + throw armnn::ParseException(fmt::format("Buffer for buffer:{} is null", tensorPtr->buffer).c_str()); + } std::unique_ptr data(new T[tensorInfo.GetNumElements()]); @@ -999,7 +998,11 @@ INetworkPtr TfLiteParserImpl::CreateNetworkFromModel() } } m_Network = INetwork::Create(networkOptions); - ARMNN_ASSERT(m_Model.get() != nullptr); + + if (m_Model.get() == nullptr) + { + throw ParseException(fmt::format("Tflite Model pointer is null {}", CHECK_LOCATION().AsString())); + } if (m_Model->subgraphs.size() != 1) { @@ -1101,8 +1104,6 @@ void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex, armnn::IOutputSlot* slot) { CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex); - ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex); - ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex); TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex]; @@ -1127,8 +1128,6 @@ void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex, armnn::IInputSlot* slot) { CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex); - ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex); - ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex); TensorSlots& tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex]; tensorSlots.inputSlots.push_back(slot); @@ -1198,7 +1197,12 @@ void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t ope // Add a non-executable StandInLayer as a placeholder for any unsupported operator IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } for (unsigned int i = 0u; i < numOutputs; ++i) { @@ -1224,7 +1228,12 @@ void TfLiteParserImpl::ParseCast(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Cast:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddCastLayer(layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1301,7 +1310,11 @@ void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex) } } - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1381,7 +1394,12 @@ void TfLiteParserImpl::ParseConv3D(size_t subgraphIndex, size_t operatorIndex) } armnn::IConnectableLayer* layer = m_Network->AddConvolution3dLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1457,7 +1475,12 @@ void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operato // Add the biases input to the registration list, a constant layer will be added by SetupConstantLayers. tensorIndexesToRegister.emplace_back(inputTensorIndexes[2]); } - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1485,7 +1508,12 @@ void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorInde auto layerName = fmt::format("Dequantize:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1556,8 +1584,12 @@ void TfLiteParserImpl::ParseExpandDims(size_t subgraphIndex, size_t operatorInde outputTensorInfo.SetShape(reshapeDesc.m_TargetShape); IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); - layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex); m_TensorInfos[outputTensorIds[0]] = outputTensorInfo; @@ -1596,7 +1628,12 @@ void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -1736,7 +1773,11 @@ void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorI layerName.c_str()); } - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0 , { 2, 1 }); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1780,7 +1821,12 @@ void TfLiteParserImpl::ParseBatchMatMul(size_t subgraphIndex, size_t operatorInd // Arbitrary DataLayout IConnectableLayer* layer = m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1831,7 +1877,12 @@ void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operator TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -1859,7 +1910,11 @@ void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operato auto layerName = fmt::format("L2Normalization:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -1893,7 +1948,12 @@ void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex) CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1"); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Maximum, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -1923,7 +1983,12 @@ void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex) CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1"); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Minimum, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -1960,7 +2025,7 @@ void TfLiteParserImpl::ParsePool(size_t subgraphIndex, fmt::format("MaxPool2D:{}:{}", subgraphIndex, operatorIndex); break; default: - ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm"); + throw ParseException(fmt::format("Unsupported Pooling Algorithm {}", CHECK_LOCATION().AsString())); } Pooling2dDescriptor desc; @@ -1991,7 +2056,12 @@ void TfLiteParserImpl::ParsePool(size_t subgraphIndex, CHECK_VALID_SIZE(outputs.size(), 1); IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -2180,7 +2250,12 @@ void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operator TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -2218,7 +2293,13 @@ void TfLiteParserImpl::ParseSpaceToDepth(size_t subgraphIndex, size_t operatorIn auto layerName = fmt::format("SpaceToDepth:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2292,7 +2373,12 @@ void TfLiteParserImpl::ParseShape(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Shape:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddShapeLayer(layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2354,7 +2440,13 @@ void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex) m_TensorInfos[outputTensorIds[0]] = outputTensorInfo; IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); @@ -2431,7 +2523,12 @@ void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIn auto layerName = fmt::format("StridedSlice:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2461,7 +2558,12 @@ void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Sub:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Sub, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2495,7 +2597,12 @@ void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2526,7 +2633,12 @@ void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Div, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2557,7 +2669,12 @@ void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Add:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Add, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2591,7 +2708,12 @@ void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Mul:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Mul, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2651,7 +2773,12 @@ void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Mean:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddMeanLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2751,7 +2878,13 @@ void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex) : fmt::format("PadV2:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2822,7 +2955,13 @@ void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -2849,8 +2988,12 @@ void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex) armnn::TensorInfo alphaTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } if (IsConstTensor(inputs[1])) { @@ -2863,7 +3006,12 @@ void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex) std::string constLayerName = fmt::format("Constant:{}", inputs[1]->name); IConnectableLayer* constLayer = m_Network->AddConstantLayer(alphaTensorAndData.first, constLayerName.c_str()); - ARMNN_ASSERT(constLayer != nullptr); + + if (!constLayer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo); constLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1)); @@ -2899,7 +3047,12 @@ void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Quantize:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -3215,7 +3368,13 @@ void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex) m_TensorInfos[outputTensorIds[0]] = reshapeOutputTensorInfo; IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + layer->GetOutputSlot(0).SetTensorInfo(reshapeOutputTensorInfo); auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); @@ -3289,7 +3448,13 @@ void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, R TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -3408,7 +3573,13 @@ void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorI auto layerName = fmt::format("Concatenation:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -3487,7 +3658,11 @@ void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operator // Filters and biases are always passed to fully connected as inputs layer = m_Network->AddFullyConnectedLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } unsigned int startingSlotIndex = 0; if (inputTensorInfo.GetNumDimensions() > 2) @@ -3615,7 +3790,11 @@ void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t op IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } // The model does not specify the output shapes. // The output shapes are calculated from the max_detection and max_classes_per_detection. @@ -3672,7 +3851,11 @@ void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Pack:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } armnn::TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -3936,7 +4119,12 @@ void TfLiteParserImpl::ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, siz auto layerName = fmt::format("UnidirectionalSequenceLSTM:{}:{}", subgraphIndex, operatorIndex); armnn::IConnectableLayer* layer = m_Network->AddUnidirectionalSequenceLstmLayer(desc, params); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } // register the input connection slots for the layer, connections are made after all layers have been created // only the tensors for the inputs are relevant, exclude the const tensors @@ -4029,7 +4217,12 @@ void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Unpack:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorShape splitOutShape = TensorShape(static_cast(unpackDimSizes.size()), unpackDimSizes.data()); @@ -4093,7 +4286,12 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex) armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); - ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1); + + if (axisTensorInfo.GetNumElements() != 1) + { + throw ParseException(fmt::format("Axis tensor can only have 1 element {}", + CHECK_LOCATION().AsString())); + } BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer); if (axisBufferPtr == nullptr) @@ -4158,7 +4356,12 @@ void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("Split:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]}); @@ -4177,8 +4380,11 @@ unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn) { int numDims = armnn::numeric_cast(numDimsIn); int v = idx < 0 ? numDims + idx : idx; - ARMNN_ASSERT(v >= 0); - ARMNN_ASSERT(v < numDims); + + if (v < 0 || v > numDims) + { + throw ParseException(fmt::format("Unable to compute index {}", CHECK_LOCATION().AsString())); + } return static_cast(v); } @@ -4200,7 +4406,12 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex) armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputTensor); armnn::TensorInfo splitsInfo = ToTensorInfo(splitsTensor); armnn::TensorInfo axisTensorInfo = ToTensorInfo(axisTensor); - ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1); + + if (axisTensorInfo.GetNumElements() != 1) + { + throw ParseException(fmt::format("Axis tensor can only have 1 element {}", + CHECK_LOCATION().AsString())); + } // Inputs auto inputDimSize = inputTensorInfo.GetNumDimensions(); @@ -4324,7 +4535,12 @@ void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("SplitV:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]}); @@ -4361,7 +4577,12 @@ void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0); armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); - ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1); + + if (axisTensorInfo.GetNumElements() != 1) + { + throw ParseException(fmt::format("Axis tensor can only have 1 element {}", + CHECK_LOCATION().AsString())); + } // Check if output tensor type is Signed32 or Signed64 if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 && @@ -4406,7 +4627,13 @@ void TfLiteParserImpl::ParseArgMinMax(size_t subgraphIndex, size_t operatorIndex auto layerName = argMinMaxFunction == ArgMinMaxFunction::Max ? "ArgMax:{}:{}" : "ArgMin:{}:{}"; auto layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex); IConnectableLayer *layer = m_Network->AddArgMinMaxLayer(desc, layerNameFormatted.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -4463,7 +4690,13 @@ void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex) gatherDescriptor.m_Axis = axis; IConnectableLayer* layer = m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -4488,7 +4721,13 @@ void TfLiteParserImpl::ParseGatherNd(size_t subgraphIndex, size_t operatorIndex) auto layerName = fmt::format("GatherNd:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddGatherNdLayer(layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -4524,7 +4763,13 @@ void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIn auto layerName = fmt::format("DepthToSpace:{}:{}", subgraphIndex, operatorIndex); IConnectableLayer* layer = m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -4649,7 +4894,12 @@ void TfLiteParserImpl::ParseLocalResponseNormalization(size_t subgraphIndex, siz descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize); IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor, layerNameFormatted.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -4708,7 +4958,12 @@ void TfLiteParserImpl::ParsePower(size_t subgraphIndex, size_t operatorIndex) CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1"); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::Power, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -4780,7 +5035,12 @@ void TfLiteParserImpl::ParseSquaredDifference(size_t subgraphIndex, size_t opera CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1"); IConnectableLayer* layer = m_Network->AddElementwiseBinaryLayer(BinaryOperation::SqDiff, layerName.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0"); @@ -4809,7 +5069,12 @@ void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operat ElementwiseUnaryDescriptor desc; desc.m_Operation = unaryOperation; IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -4872,7 +5137,12 @@ void TfLiteParserImpl::ParseComparison(size_t subgraphIndex, size_t operatorInde ComparisonDescriptor desc; desc.m_Operation = comparisonOperation; IConnectableLayer* layer = m_Network->AddComparisonLayer(desc, layerNameFormatted.c_str()); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } TensorInfo outputTensorInfo = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0, 1}); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); @@ -5139,7 +5409,12 @@ void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex, unsigned int startingSlotIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } if (tensorIndexes.size() + startingSlotIndex != layer->GetNumInputSlots()) { @@ -5167,7 +5442,13 @@ void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex, const std::vector& tensorIndexes) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); - ARMNN_ASSERT(layer != nullptr); + + if (!layer) + { + throw NullPointerException(fmt::format("Layer {} pointer is null {}", + operatorIndex, CHECK_LOCATION().AsString())); + } + if (tensorIndexes.size() != layer->GetNumOutputSlots()) { throw ParseException( -- cgit v1.2.1