aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeclan-ARM <decmce01@arm.com>2024-03-12 16:40:25 +0000
committerColm Donelan <colm.donelan@arm.com>2024-03-13 10:07:56 +0000
commit7c75e336fbeeec052a1cb90c68d1caece332c176 (patch)
tree8fac689c1b4192522f5fa98bccbfab12b8e08afe
parent93bbf00d968101fb9a9174ad011b655ca7100546 (diff)
downloadarmnn-7c75e336fbeeec052a1cb90c68d1caece332c176.tar.gz
IVGCVSW-7853 Assert audit and removal
* src/armnn * src/armnn/layers Signed-off-by: Declan-ARM <decmce01@arm.com> Change-Id: Ic78cbbb59e90fbb15f893205a358c45264243721
-rw-r--r--src/armnn/ArmNNProfilingServiceInitialiser.cpp16
-rw-r--r--src/armnn/Descriptors.cpp7
-rw-r--r--src/armnn/Graph.cpp83
-rw-r--r--src/armnn/Graph.hpp37
-rw-r--r--src/armnn/InternalTypes.cpp4
-rw-r--r--src/armnn/Layer.cpp50
-rw-r--r--src/armnn/LoadedNetwork.cpp117
-rw-r--r--src/armnn/Logging.cpp4
-rw-r--r--src/armnn/Network.cpp47
-rw-r--r--src/armnn/NetworkUtils.cpp8
-rw-r--r--src/armnn/Optimizer.cpp8
-rw-r--r--src/armnn/Profiling.cpp32
-rw-r--r--src/armnn/Runtime.cpp11
-rw-r--r--src/armnn/SubgraphView.cpp18
-rw-r--r--src/armnn/SubgraphViewSelector.cpp25
-rw-r--r--src/armnn/Tensor.cpp2
-rw-r--r--src/armnn/layers/AbsLayer.cpp11
-rw-r--r--src/armnn/layers/ActivationLayer.cpp9
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp23
-rw-r--r--src/armnn/layers/BatchMatMulLayer.cpp17
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp32
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp9
-rw-r--r--src/armnn/layers/CastLayer.cpp9
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.cpp11
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp23
-rw-r--r--src/armnn/layers/ConcatLayer.cpp28
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp9
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp9
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp38
-rw-r--r--src/armnn/layers/Convolution3dLayer.cpp44
-rw-r--r--src/armnn/layers/DebugLayer.cpp9
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp15
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp39
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp9
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp30
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp22
-rw-r--r--src/armnn/layers/ElementwiseBinaryLayer.cpp22
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp17
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp9
-rw-r--r--src/armnn/layers/FillLayer.cpp9
-rw-r--r--src/armnn/layers/FloorLayer.cpp10
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp22
-rw-r--r--src/armnn/layers/GatherLayer.cpp25
-rw-r--r--src/armnn/layers/GatherNdLayer.cpp33
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp9
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp9
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp10
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.cpp32
-rw-r--r--src/armnn/layers/LstmLayer.cpp198
-rw-r--r--src/armnn/layers/MapLayer.cpp8
-rw-r--r--src/armnn/layers/MeanLayer.cpp28
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp9
-rw-r--r--src/armnn/layers/MemImportLayer.cpp9
-rw-r--r--src/armnn/layers/MergeLayer.cpp15
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp9
-rw-r--r--src/armnn/layers/PadLayer.cpp31
-rw-r--r--src/armnn/layers/PermuteLayer.cpp16
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp29
-rw-r--r--src/armnn/layers/Pooling3dLayer.cpp29
-rw-r--r--src/armnn/layers/PreluLayer.cpp33
-rw-r--r--src/armnn/layers/QLstmLayer.cpp200
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp124
-rw-r--r--src/armnn/layers/ReduceLayer.cpp21
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp15
-rw-r--r--src/armnn/layers/ResizeLayer.cpp15
-rw-r--r--src/armnn/layers/ReverseV2Layer.cpp15
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp11
-rw-r--r--src/armnn/layers/ShapeLayer.cpp16
-rw-r--r--src/armnn/layers/SliceLayer.cpp16
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp9
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp9
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp15
-rw-r--r--src/armnn/layers/SplitterLayer.cpp25
-rw-r--r--src/armnn/layers/StackLayer.cpp21
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp15
-rw-r--r--src/armnn/layers/SwitchLayer.cpp14
-rw-r--r--src/armnn/layers/TileLayer.cpp18
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp53
-rw-r--r--src/armnn/layers/TransposeLayer.cpp16
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp214
-rw-r--r--src/armnn/layers/UnmapLayer.cpp8
81 files changed, 1770 insertions, 566 deletions
diff --git a/src/armnn/ArmNNProfilingServiceInitialiser.cpp b/src/armnn/ArmNNProfilingServiceInitialiser.cpp
index bbed43ae4c..7ca3fc1bd8 100644
--- a/src/armnn/ArmNNProfilingServiceInitialiser.cpp
+++ b/src/armnn/ArmNNProfilingServiceInitialiser.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd. All rights reserved.
+// Copyright © 2022,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,11 @@ void ArmNNProfilingServiceInitialiser::InitialiseProfilingService(arm::pipe::IPr
networkLoads,
"The number of networks loaded at runtime",
networks);
- ARMNN_ASSERT(loadedNetworksCounter);
+ if (!loadedNetworksCounter)
+ {
+ throw armnn::NullPointerException("loadedNetworksCounter must not be null.");
+ }
+
profilingService.InitializeCounterValue(loadedNetworksCounter->m_Uid);
}
// Register a counter for the number of unloaded networks
@@ -57,7 +61,7 @@ void ArmNNProfilingServiceInitialiser::InitialiseProfilingService(arm::pipe::IPr
networkUnloads,
"The number of networks unloaded at runtime",
networks);
- ARMNN_ASSERT(unloadedNetworksCounter);
+
profilingService.InitializeCounterValue(unloadedNetworksCounter->m_Uid);
}
std::string backends("backends");
@@ -75,7 +79,7 @@ void ArmNNProfilingServiceInitialiser::InitialiseProfilingService(arm::pipe::IPr
backendsRegistered,
"The number of registered backends",
backends);
- ARMNN_ASSERT(registeredBackendsCounter);
+
profilingService.InitializeCounterValue(registeredBackendsCounter->m_Uid);
// Due to backends being registered before the profiling service becomes active,
@@ -97,7 +101,7 @@ void ArmNNProfilingServiceInitialiser::InitialiseProfilingService(arm::pipe::IPr
backendsUnregistered,
"The number of unregistered backends",
backends);
- ARMNN_ASSERT(unregisteredBackendsCounter);
+
profilingService.InitializeCounterValue(unregisteredBackendsCounter->m_Uid);
}
// Register a counter for the number of inferences run
@@ -115,7 +119,7 @@ void ArmNNProfilingServiceInitialiser::InitialiseProfilingService(arm::pipe::IPr
inferencesRun,
"The number of inferences run",
inferences);
- ARMNN_ASSERT(inferencesRunCounter);
+
profilingService.InitializeCounterValue(inferencesRunCounter->m_Uid);
}
}
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 5e4628bd77..b8f4c7aa94 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "armnn/Descriptors.hpp"
@@ -203,8 +203,9 @@ const uint32_t* OriginsDescriptor::GetViewOrigin(uint32_t idx) const
// Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering)
{
- ARMNN_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
- "elements in the new ordering array");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_NumViews == numNewOrdering,
+ "number of views must match number of elements in the new ordering array");
+
std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]);
for (unsigned int i = 0; i < numNewOrdering; ++i)
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index f7fbba783e..70ecb32792 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -207,7 +207,8 @@ Status Graph::SerializeToDot(std::ostream& stream)
Status Graph::AllocateDynamicBuffers()
{
// Layers must be sorted in topological order
- ARMNN_ASSERT(m_LayersInOrder);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_LayersInOrder, "layers must be in order.");
+
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
std::unordered_set<const ITensorHandle*> preallocatedTensors;
@@ -334,7 +335,10 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
auto MayNeedCompatibilityLayer = [](const Layer& layer)
{
// All layers should have been associated with a valid compute device at this point.
- ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
+ if (layer.GetBackendId() == Compute::Undefined)
+ {
+ throw armnn::Exception("AddCompatibilityLayers: All layers must be assigned to a backend at this point.");
+ }
// Does not need another compatibility layer if a copy or import layer is already present.
return layer.GetType() != LayerType::MemCopy &&
layer.GetType() != LayerType::MemImport;
@@ -348,7 +352,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
{
- ARMNN_ASSERT(srcLayer);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(srcLayer, "source layer must not be null.");
if (!MayNeedCompatibilityLayer(*srcLayer))
{
@@ -365,11 +369,17 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
{
InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
- ARMNN_ASSERT(dstInputSlot);
+ if (!dstInputSlot)
+ {
+ throw armnn::Exception("dstInputSlot must not be null.");
+ }
EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
- ARMNN_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
- "Undefined memory strategy found while adding copy layers for compatibility");
+ if (strategy == EdgeStrategy::Undefined)
+ {
+ throw armnn::Exception("Undefined memory strategy found "
+ "while adding copy layers for compatibility");
+ }
const Layer& dstLayer = dstInputSlot->GetOwningLayer();
if (MayNeedCompatibilityLayer(dstLayer) &&
@@ -390,7 +400,11 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
}
else
{
- ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
+ if (strategy != EdgeStrategy::ExportToTarget)
+ {
+ throw armnn::Exception("Invalid edge strategy found.");
+ }
+
compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
}
@@ -460,7 +474,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
{
- ARMNN_ASSERT(substituteLayer != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
// Create a new sub-graph with only the given layer, using
// the given sub-graph as a reference of which parent graph to use
@@ -491,16 +505,19 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& subst
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
- ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
- "New sub-graph used for substitution must not be empty");
+ if (substituteSubgraph.GetIConnectableLayers().empty())
+ {
+ throw armnn::Exception("New sub-graph used for substitution must not be empty");
+ }
const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
{
- IgnoreUnused(layer);
layer = PolymorphicDowncast<Layer*>(layer);
- ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
- "Substitute layer is not a member of graph");
+ if (std::find(m_Layers.begin(), m_Layers.end(), layer) == m_Layers.end())
+ {
+ throw armnn::Exception("Substitute layer is not a member of graph");
+ }
});
const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
@@ -512,8 +529,15 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
- ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
- ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
+ if (subgraphNumInputSlots != substituteSubgraphInputSlots.size())
+ {
+ throw armnn::Exception("subgraph and substitute subgraph input slot sizes must be the same.");
+ }
+
+ if (subgraphNumOutputSlots != substituteSubgraphOutputSlots.size())
+ {
+ throw armnn::Exception("subgraph and substitute subgraph output slot sizes must be the same.");
+ }
// Disconnect the sub-graph and replace it with the substitute sub-graph
@@ -521,7 +545,10 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
{
IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
- ARMNN_ASSERT(subgraphInputSlot);
+ if (!subgraphInputSlot)
+ {
+ throw armnn::NullPointerException("subgraphInputSlot must not be null.");
+ }
// Only disconnect if the InputSlot has a connection, this might not be the case when
// dealing with working copies of SubgraphViews
@@ -532,11 +559,19 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
InputSlot* inputSlot = PolymorphicDowncast<InputSlot*>(subgraphInputSlot);
bool isOverridden = inputSlot->IsTensorInfoOverridden();
- ARMNN_ASSERT(connectedOutputSlot);
+ if (!connectedOutputSlot)
+ {
+ throw armnn::NullPointerException("connectedOutputSlot must not be null.");
+ }
+
connectedOutputSlot->Disconnect(*subgraphInputSlot);
IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
- ARMNN_ASSERT(substituteInputSlot);
+ if (!substituteInputSlot)
+ {
+ throw armnn::NullPointerException("substituteInputSlot must not be null.");
+ }
+
connectedOutputSlot->Connect(*substituteInputSlot);
if (isOverridden)
@@ -553,11 +588,17 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
{
auto subgraphOutputSlot =
PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
- ARMNN_ASSERT(subgraphOutputSlot);
+ if (!subgraphOutputSlot)
+ {
+ throw armnn::NullPointerException("subgraphOutputSlot must not be null.");
+ }
auto substituteOutputSlot =
PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
- ARMNN_ASSERT(substituteOutputSlot);
+ if (!substituteOutputSlot)
+ {
+ throw armnn::NullPointerException("substituteOutputSlot must not be null.");
+ }
subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
}
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index aa543c1357..599958847c 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -126,8 +126,15 @@ public:
otherLayer->Reparent(*this, m_Layers.end());
});
- ARMNN_ASSERT(other.m_PosInGraphMap.empty());
- ARMNN_ASSERT(other.m_Layers.empty());
+ if (!other.m_PosInGraphMap.empty())
+ {
+ throw armnn::Exception("assignment positions in graph map must be empty.");
+ }
+
+ if (!other.m_Layers.empty())
+ {
+ throw armnn::Exception("assignment layers must be empty.");
+ }
return *this;
}
@@ -336,8 +343,10 @@ private:
graph.m_Layers.erase(layerIt);
const size_t numErased = graph.m_PosInGraphMap.erase(this);
- IgnoreUnused(numErased);
- ARMNN_ASSERT(numErased == 1);
+ if (numErased != 1)
+ {
+ throw armnn::Exception("numErased must be \"1\".");
+ }
}
protected:
@@ -415,7 +424,6 @@ public:
{
const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
IgnoreUnused(numErased);
- ARMNN_ASSERT(numErased == 1);
}
};
@@ -441,14 +449,16 @@ public:
{
const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
IgnoreUnused(numErased);
- ARMNN_ASSERT(numErased == 1);
}
};
inline Graph::Iterator Graph::GetPosInGraph(Layer& layer)
{
auto it = m_PosInGraphMap.find(&layer);
- ARMNN_ASSERT(it != m_PosInGraphMap.end());
+ if (it == m_PosInGraphMap.end())
+ {
+ throw armnn::Exception("unable to find layer in graph map.");
+ }
return it->second;
}
@@ -491,7 +501,10 @@ inline LayerT* Graph::InsertNewLayer(OutputSlot& insertAfter, Args&&... args)
const Iterator pos = std::next(GetPosInGraph(owningLayer));
LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
- ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
+ if (layer->GetNumInputSlots() != 1)
+ {
+ throw armnn::Exception("layer must only one input slot.");
+ }
insertAfter.MoveAllConnections(layer->GetOutputSlot());
insertAfter.Connect(layer->GetInputSlot(0));
@@ -511,7 +524,11 @@ inline void Graph::EraseLayer(Iterator pos)
template <typename LayerT>
inline void Graph::EraseLayer(LayerT*& layer)
{
- ARMNN_ASSERT(layer != nullptr);
+ if (!layer)
+ {
+ throw armnn::NullPointerException("layer must not be null.");
+ }
+
EraseLayer(GetPosInGraph(*layer));
layer = nullptr;
}
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index aebc721be3..4a8380287b 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,7 @@ char const* GetLayerTypeAsCString(LayerType type)
LIST_OF_LAYER_TYPE
#undef X
default:
- ARMNN_ASSERT_MSG(false, "Unknown layer type");
+ throw armnn::InvalidArgumentException("Unknown layer type");
return "Unknown";
}
}
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index d2f8f2c982..5a1ec9c1df 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Layer.hpp"
@@ -33,13 +33,18 @@ void AssertNumberOfInputSlots(Layer& layer)
case LayerType::DepthwiseConvolution2d:
case LayerType::FullyConnected:
{
- ARMNN_ASSERT(layer.GetNumInputSlots() == 2 ||
- layer.GetNumInputSlots() == 3);
+ if (layer.GetNumInputSlots() != 2 && layer.GetNumInputSlots() != 3)
+ {
+ throw armnn::Exception("layer must have either 2 or 3 input slots.");
+ }
break;
}
default:
{
- ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
+ if (layer.GetNumInputSlots() != 1)
+ {
+ throw armnn::Exception("layer must have one input slot.");
+ }
break;
}
}
@@ -47,7 +52,10 @@ void AssertNumberOfInputSlots(Layer& layer)
void InputSlot::Insert(Layer& layer)
{
- ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
+ if (layer.GetNumOutputSlots() != 1)
+ {
+ throw armnn::Exception("layer must have one output slot.");
+ }
OutputSlot* const prevSlot = GetConnectedOutputSlot();
@@ -105,7 +113,10 @@ bool OutputSlot::IsTensorInfoSet() const
bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
{
- ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
+ if (!IsTensorInfoSet())
+ {
+ throw armnn::Exception("TensorInfo must be set in order to validate the shape.");
+ }
return shape == m_OutputHandler.GetTensorInfo().GetShape();
}
@@ -146,8 +157,10 @@ void OutputSlot::MoveAllConnections(OutputSlot& destination)
{
while (GetNumConnections() > 0)
{
- ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
- "Cannot move connections once memory strategies have be established.");
+ if (m_EdgeStrategies[0] != EdgeStrategy::Undefined)
+ {
+ throw armnn::Exception("Cannot move connections once memory strategies have be established.");
+ }
InputSlot& connection = *GetConnection(0);
Disconnect(connection);
@@ -165,7 +178,7 @@ unsigned int OutputSlot::CalculateIndexOnOwner() const
return i;
}
}
- ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
+ throw armnn::Exception("Did not find slot on owner.");
return 0; // Error
}
@@ -257,7 +270,10 @@ void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
for (auto&& inputSlot : GetInputSlots())
{
// The graph must be well-formed at this point.
- ARMNN_ASSERT(inputSlot.GetConnection());
+ if (!inputSlot.GetConnection())
+ {
+ throw armnn::Exception("input slot must have valid connection.");
+ }
const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
if (inputSlot.IsTensorInfoOverridden() && outputHandler.GetData())
@@ -308,7 +324,10 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
{
ITensorHandleFactory* handleFactory;
handleFactory = registry.GetFactory(factoryId);
- ARMNN_ASSERT(handleFactory);
+ if (!handleFactory)
+ {
+ throw armnn::NullPointerException("handleFactory must not be null.");
+ }
handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
}
}
@@ -390,7 +409,10 @@ LayerPriority Layer::GetPriority() const
void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
{
- ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
+ if (GetNumInputSlots() != expectedConnections)
+ {
+ throw armnn::Exception("input slots must match expected connections.");
+ }
for (unsigned int i=0; i<expectedConnections; ++i)
{
@@ -409,8 +431,8 @@ void Layer::VerifyLayerConnections(unsigned int expectedConnections, const Check
std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(GetNumInputSlots() != 0);
- ARMNN_ASSERT(GetNumOutputSlots() != 0);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumInputSlots() != 0, "input slots must not be zero.");
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumOutputSlots() != 0, "output slots must not be zero.");
// By default we return what we got, meaning the output shape(s) are the same as the input(s).
// This only works if the number of inputs and outputs are the same. Since we are in the Layer
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 91113c5ecb..d0bafc02d9 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -60,7 +60,10 @@ void AddLayerStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
for (auto&& input : layer.GetInputSlots())
{
const IOutputSlot* source = input.GetConnectedOutputSlot();
- ARMNN_ASSERT(source != NULL);
+ if (!source)
+ {
+ throw armnn::NullPointerException("Null source found on input to layer \"" + layerName + "\".");
+ }
timelineUtils->CreateConnectionRelationship(ProfilingRelationshipType::RetentionLink,
source->GetOwningLayerGuid(),
layer.GetGuid());
@@ -643,7 +646,10 @@ void LoadedNetwork::AllocateAndExecuteConstantWorkloadsAsync()
{
const auto& outSlot = layer->GetOutputSlots()[0];
const auto factoryId = outSlot.GetTensorHandleFactoryId();
- ARMNN_ASSERT(factoryId != ITensorHandleFactory::LegacyFactoryId);
+ if (factoryId == ITensorHandleFactory::LegacyFactoryId)
+ {
+ throw armnn::Exception("factoryId must not be of type \"Legacy\".");
+ }
auto& workloadFactory = GetWorkloadFactory(*layer);
layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory);
@@ -710,7 +716,11 @@ TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
{
for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
{
- ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ if (inputLayer->GetNumOutputSlots() != 1)
+ {
+ throw armnn::GraphValidationException("Input layer should have exactly 1 output slot");
+ }
+
if (inputLayer->GetBindingId() == layerId)
{
return inputLayer->GetOutputSlot(0).GetTensorInfo();
@@ -724,8 +734,16 @@ TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
{
for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
{
- ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
- ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
+ if (outputLayer->GetNumInputSlots() != 1)
+ {
+ throw armnn::GraphValidationException("Output layer should have exactly 1 input slot");
+ }
+
+ if (!outputLayer->GetInputSlot(0).GetConnection())
+ {
+ throw armnn::GraphValidationException("Input slot on Output layer must be connected");
+ }
+
if (outputLayer->GetBindingId() == layerId)
{
return outputLayer->GetInputSlot(0).GetTensorInfo();
@@ -750,7 +768,10 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
workloadFactory = it->second.get();
- ARMNN_ASSERT_MSG(workloadFactory, "No workload factory");
+ if (!workloadFactory)
+ {
+ throw armnn::NullPointerException("No workload factory");
+ }
return *workloadFactory;
}
@@ -962,14 +983,22 @@ Status LoadedNetwork::EnqueueWorkload(const InputTensors& inputTensors,
m_IsOutputImported[outputIndex] = true;
}
- ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+ if (!inputTensorHandle)
+ {
+ throw armnn::NullPointerException("Data should have been allocated.");
+ }
+
MemSyncQueueDescriptor syncDesc;
syncDesc.m_Inputs.push_back(inputTensorHandle);
WorkloadInfo info;
- info.m_InputTensorInfos.push_back(
- outputLayer->GetInputSlot(0).GetTensorInfo());
+ info.m_InputTensorInfos.push_back(outputLayer->GetInputSlot(0).GetTensorInfo());
+
auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
- ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
+ if (!syncWorkload)
+ {
+ throw armnn::NullPointerException("No sync workload created");
+ }
+
m_OutputQueue.push_back(std::move(syncWorkload));
importedOutputIdIndex++;
}
@@ -1058,12 +1087,20 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens
inputQueueDescriptor.m_Inputs.push_back(tensorHandle);
info.m_InputTensorInfos.push_back(tensorInfo);
- ARMNN_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
+ if (layer.GetNumOutputSlots() != 1)
+ {
+ throw armnn::GraphValidationException("Can only handle Input Layer with one output");
+ }
+
const OutputHandler& handler = layer.GetOutputHandler();
const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
ITensorHandle* outputTensorHandle = handler.GetData();
- ARMNN_ASSERT_MSG(outputTensorHandle != nullptr,
- "Data should have been allocated.");
+
+ if (!outputTensorHandle)
+ {
+ throw armnn::NullPointerException("Data should have been allocated.");
+ }
+
inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle);
info.m_OutputTensorInfos.push_back(outputTensorInfo);
@@ -1090,7 +1127,10 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens
// Create a mem copy workload for input since we did not import
std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor, info);
- ARMNN_ASSERT_MSG(inputWorkload, "No input workload created");
+ if (!inputWorkload)
+ {
+ throw armnn::NullPointerException("No input workload created");
+ }
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
@@ -1123,14 +1163,20 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
outputQueueDescriptor.m_Outputs.push_back(tensorHandle);
info.m_OutputTensorInfos.push_back(tensorInfo);
- ARMNN_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
+ if (layer.GetNumInputSlots() != 1)
+ {
+ throw armnn::GraphValidationException("Output Layer should have exactly one input.");
+ }
// Gets the output handler from the previous node.
const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
ITensorHandle* inputTensorHandle = outputHandler.GetData();
- ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+ if (!inputTensorHandle)
+ {
+ throw armnn::NullPointerException("Data should have been allocated.");
+ }
// Try import the output tensor.
// Note: We can only import the output pointer if all of the following hold true:
@@ -1160,7 +1206,10 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
syncDesc.m_Inputs.push_back(inputTensorHandle);
info.m_InputTensorInfos.push_back(inputTensorInfo);
auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
- ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
+ if (!syncWorkload)
+ {
+ throw armnn::NullPointerException("No sync workload created");
+ }
m_OutputQueue.push_back(std::move(syncWorkload));
}
else
@@ -1178,7 +1227,10 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
std::unique_ptr<IWorkload> outputWorkload =
std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info);
- ARMNN_ASSERT_MSG(outputWorkload, "No output workload created");
+ if (!outputWorkload)
+ {
+ throw armnn::NullPointerException("No output workload created");
+ }
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
@@ -1361,7 +1413,11 @@ void LoadedNetwork::EnqueueInput(const ConstTensor& inputTensor, ITensorHandle*
// e) m_IsExportEnabled must be set to true
void LoadedNetwork::ImportOutputTensor(const Tensor& outputTensor, ITensorHandle* outputTensorHandle)
{
- ARMNN_ASSERT_MSG(outputTensorHandle != nullptr, "Data should have been allocated.");
+ if (!outputTensorHandle)
+ {
+ throw armnn::NullPointerException("Data should have been allocated.");
+ }
+
MemorySourceFlags importFlags = outputTensorHandle->GetImportFlags();
if (CheckFlag(importFlags, m_NetworkProperties.m_OutputSource))
{
@@ -1534,7 +1590,10 @@ std::vector<ImportedInputId> LoadedNetwork::ImportInputs(const InputTensors& inp
const TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
- ARMNN_ASSERT(handleFactory);
+ if (!handleFactory)
+ {
+ throw armnn::NullPointerException("handleFactory must not be null.");
+ }
ImportedTensorHandlePin importedTensorHandlePin{layerBindingId,
handleFactory->CreateTensorHandle(tensorInfo, false)};
@@ -1667,7 +1726,10 @@ std::vector<ImportedOutputId> LoadedNetwork::ImportOutputs(const OutputTensors&
const TensorInfo& tensorInfo = inputSlot.GetTensorInfo();
ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
- ARMNN_ASSERT(handleFactory);
+ if (!handleFactory)
+ {
+ throw armnn::NullPointerException("handleFactory must not be null.");
+ }
ImportedTensorHandlePin importedTensorHandlePin{layerBindingId,
handleFactory->CreateTensorHandle(tensorInfo, false)};
@@ -1987,7 +2049,10 @@ std::unique_ptr<IWorkingMemHandle> LoadedNetwork::CreateWorkingMemHandle(Network
else
{
ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
- ARMNN_ASSERT(handleFactory);
+ if (!handleFactory)
+ {
+ throw armnn::NullPointerException("handleFactory must not be null.");
+ }
return handleFactory->CreateTensorHandle(tensorInfo, false);
}
};
@@ -2098,7 +2163,11 @@ std::unique_ptr<IWorkingMemHandle> LoadedNetwork::CreateWorkingMemHandle(Network
// so that the next tensor handle with a non overlapping lifetime can share its memory.
for (auto& slot : layer->GetInputSlots())
{
- ARMNN_ASSERT(slot.GetConnection());
+ if (!slot.GetConnection())
+ {
+ throw armnn::GraphValidationException("slot must be a valid input slot.");
+ }
+
auto outputSlot = slot.GetConnectedOutputSlot();
auto key = outputSlot->GetOwningLayer().GetGuid();
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index 73879e644d..bceb110273 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -99,7 +99,7 @@ void SetLogFilter(LogSeverity level)
SimpleLogger<LogSeverity::Fatal>::Get().Enable(true);
break;
default:
- ARMNN_ASSERT(false);
+ throw armnn::InvalidArgumentException("Unknown LoggingSeverity level.");
}
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 1a4fec59ce..6f33fb6a15 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1039,7 +1039,7 @@ bool CheckFp16Support(BackendsMap& backends,
// Check if the first preferred backend has FP16 support
auto firstBackend = availablePreferredBackends[0];
auto backendObjPtr = backends.find(firstBackend)->second.get();
- ARMNN_ASSERT(backendObjPtr);
+
auto hasFp16Capability = BackendOptions::BackendOption{"HasFp16", true};
auto backendCapabilities = backendObjPtr->GetCapabilities();
@@ -1158,10 +1158,6 @@ void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
// Note: we don't need to log the error as it would already
// be logged in AttemptBackendAssignment().
}
- else
- {
- ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
- }
}
}
@@ -1321,7 +1317,6 @@ BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRe
{
auto backendFactory = backendRegistry.GetFactory(selectedBackend);
auto backendObjPtr = backendFactory();
- ARMNN_ASSERT(backendObjPtr);
backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
@@ -1337,7 +1332,6 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
const ModelOptions& modelOptions,
Optional<std::vector<std::string>&> errMessages)
{
- ARMNN_ASSERT(optNetObjPtr);
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ApplyBackendOptimizations")
OptimizationResult result;
@@ -1348,7 +1342,10 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
{
auto backendObjPtr = backends.find(selectedBackend)->second.get();
- ARMNN_ASSERT(backendObjPtr);
+ if (!backendObjPtr)
+ {
+ throw armnn::NullPointerException("backendObjPtr must not be null.");
+ }
if (selectedBackend == armnn::Compute::GpuAcc || selectedBackend == armnn::Compute::CpuAcc)
{
@@ -1379,7 +1376,10 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
// Try to optimize the current sub-graph
ARMNN_SCOPED_PROFILING_EVENT(backendObjPtr->GetId(), "Optimizer_OptimizeSubgraph");
OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
- ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
+ if (!optimizationViews.Validate(*subgraph))
+ {
+ throw armnn::Exception("optimizationViews must have a valid subgraph.");
+ }
// Optimization attempted, check the resulting optimized sub-graph
for (auto& substitution : optimizationViews.GetSubstitutions())
@@ -1393,7 +1393,6 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
const SubgraphView::IConnectableLayers& subgraphLayers = replacementSubgraph.GetIConnectableLayers();
std::for_each(subgraphLayers.begin(), subgraphLayers.end(), [&selectedBackend](IConnectableLayer* l)
{
- ARMNN_ASSERT(l);
PolymorphicDowncast<Layer*>(l)->SetBackendId(selectedBackend);
});
}
@@ -1487,7 +1486,11 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backend
bool importEnabled)
{
Layer& layer = slot.GetOwningLayer();
- ARMNN_ASSERT(layer.GetType() == LayerType::Input);
+
+ if (layer.GetType() != LayerType::Input)
+ {
+ throw armnn::Exception("layer must be of type \"Input\".");
+ }
// Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
// doesn't matter which backend it is assigned to because they all use the same implementation, which
@@ -1514,7 +1517,10 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backend
const Layer& connectedLayer = connection->GetOwningLayer();
auto toBackend = backends.find(connectedLayer.GetBackendId());
- ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ if (toBackend == backends.end())
+ {
+ throw armnn::Exception("Backend id not found for the connected layer");
+ }
if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
{
@@ -1672,7 +1678,10 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
const Layer& connectedLayer = connection->GetOwningLayer();
auto toBackend = backends.find(connectedLayer.GetBackendId());
- ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ if (toBackend == backends.end())
+ {
+ throw armnn::Exception("Backend id not found for the connected layer");
+ }
auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
for (auto&& src : srcPrefs)
@@ -1734,7 +1743,10 @@ EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
bool importEnabled)
{
auto toBackend = backends.find(connectedLayer.GetBackendId());
- ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ if (toBackend == backends.end())
+ {
+ throw armnn::Exception("Backend id not found for the connected layer");
+ }
auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
@@ -1827,11 +1839,12 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
{
- ARMNN_ASSERT(layer);
-
// Lets make sure the backend is in our list of supported backends. Something went wrong during backend
// assignment if this check fails
- ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
+ if (backends.find(layer->GetBackendId()) == backends.end())
+ {
+ throw armnn::Exception("Backend id not found for the layer");
+ }
// Check each output separately
for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index 1d46f029dc..f711209d33 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -125,7 +125,11 @@ std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer, bool
graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str(), toFile);
// Sets output tensor info for the debug layer.
- ARMNN_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
+ if (debugLayer->GetInputSlot(0).GetConnectedOutputSlot() != &(*outputSlot))
+ {
+ throw armnn::Exception("unable to set output tensor info for the debug layer.");
+ }
+
TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
debugLayer->GetOutputSlot().SetTensorInfo(debugInfo);
diff --git a/src/armnn/Optimizer.cpp b/src/armnn/Optimizer.cpp
index 1d6a52efed..3d1b67e2cd 100644
--- a/src/armnn/Optimizer.cpp
+++ b/src/armnn/Optimizer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Optimizer.hpp"
@@ -29,7 +29,11 @@ void Optimizer::Pass(Graph& graph, const Optimizations& optimizations)
--it;
for (auto&& optimization : optimizations)
{
- ARMNN_ASSERT(*it);
+ if (!*it)
+ {
+ throw armnn::NullPointerException("Layer must not be null.");
+ }
+
optimization->Run(graph, **it);
if ((*it)->IsOutputUnconnected())
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index db2962ecad..78afb05611 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Profiling.hpp"
@@ -43,7 +43,7 @@ constexpr bool g_WriteReportToStdOutOnProfilerDestruction = false;
Measurement FindMeasurement(const std::string& name, const Event* event)
{
- ARMNN_ASSERT(event != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(event, "event should not be null.");
// Search though the measurements.
for (const auto& measurement : event->GetMeasurements())
@@ -61,7 +61,7 @@ Measurement FindMeasurement(const std::string& name, const Event* event)
std::vector<Measurement> FindKernelMeasurements(const Event* event)
{
- ARMNN_ASSERT(event != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(event, "event should not be null.");
std::vector<Measurement> measurements;
@@ -230,13 +230,24 @@ void ProfilerImpl::EndEvent(Event* event)
{
event->Stop();
- ARMNN_ASSERT(!m_Parents.empty());
- ARMNN_ASSERT(event == m_Parents.top());
+ if (!!m_Parents.empty())
+ {
+ throw armnn::Exception("m_Parents must not be empty.");
+ }
+
+ if (event != m_Parents.top())
+ {
+ throw armnn::Exception("event must match the top of m_Parents.");
+ }
+
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
- IgnoreUnused(parent);
- ARMNN_ASSERT(event->GetParentEvent() == parent);
+
+ if (event->GetParentEvent() != parent)
+ {
+ throw armnn::Exception("parent events must match.");
+ }
#if ARMNN_STREAMLINE_ENABLED
ANNOTATE_CHANNEL_END(uint32_t(m_Parents.size()));
@@ -305,7 +316,7 @@ void ExtractJsonObjects(unsigned int inferenceIndex,
JsonChildObject& parentObject,
std::map<const Event*, std::vector<const Event*>> descendantsMap)
{
- ARMNN_ASSERT(parentEvent);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(parentEvent, "parentEvent must not be null.");
// If profiling GUID is entered, process it
if (parentEvent->GetProfilingGuid().has_value())
@@ -339,7 +350,10 @@ void ExtractJsonObjects(unsigned int inferenceIndex,
measurementObject.SetUnit(instrumentMeasurements[measurementIndex].m_Unit);
measurementObject.SetType(JsonObjectType::Measurement);
- ARMNN_ASSERT(parentObject.NumChildren() == childIdx);
+ if (parentObject.NumChildren() != childIdx)
+ {
+ throw armnn::Exception("parentObject must have the same number of children as childIdx.");
+ }
parentObject.AddChild(measurementObject);
}
else
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index b0fc55010d..a8f1eb758c 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -348,10 +348,13 @@ RuntimeImpl::RuntimeImpl(const IRuntime::CreationOptions& options)
// Store backend contexts for the supported ones
try {
auto factoryFun = BackendRegistryInstance().GetFactory(id);
- ARMNN_ASSERT(factoryFun != nullptr);
+
+ if (!factoryFun)
+ {
+ throw armnn::NullPointerException("Factory Function should not be null.");
+ }
+
auto backend = factoryFun();
- ARMNN_ASSERT(backend != nullptr);
- ARMNN_ASSERT(backend.get() != nullptr);
auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index 4259c4f288..3ede18151b 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,14 +27,17 @@ void AssertIfNullsOrDuplicates(const C& container, const std::string& errorMessa
std::unordered_set<T> duplicateSet;
std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
{
- // Ignore unused for release builds
- IgnoreUnused(errorMessage);
-
// Check if the item is valid
- ARMNN_ASSERT_MSG(i, errorMessage.c_str());
+ if (!i)
+ {
+ throw armnn::GraphValidationException(errorMessage.c_str());
+ }
// Check if a duplicate has been found
- ARMNN_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
+ if (duplicateSet.find(i) != duplicateSet.end())
+ {
+ throw armnn::GraphValidationException(errorMessage.c_str());
+ }
duplicateSet.insert(i);
});
@@ -493,7 +496,8 @@ SubgraphView SubgraphView::GetWorkingCopy() const
void SubgraphView::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
{
- ARMNN_ASSERT(substituteLayer != nullptr);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
+
SubgraphView substituteSubgraph(substituteLayer);
SubstituteSubgraph(subgraph, substituteSubgraph);
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 9fa8252790..6a134a3848 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -81,14 +81,19 @@ public:
for (PartialSubgraph* a : m_Antecedents)
{
size_t numErased = a->m_Dependants.erase(this);
- ARMNN_ASSERT(numErased == 1);
- IgnoreUnused(numErased);
+ if (numErased != 1)
+ {
+ throw armnn::Exception("number of dependents erased must only be 1.");
+ }
a->m_Dependants.insert(m_Parent);
}
for (PartialSubgraph* a : m_Dependants)
{
size_t numErased = a->m_Antecedents.erase(this);
- ARMNN_ASSERT(numErased == 1);
+ if (numErased != 1)
+ {
+ throw armnn::Exception("number of antecedents erased must only be 1.");
+ }
IgnoreUnused(numErased);
a->m_Antecedents.insert(m_Parent);
}
@@ -200,7 +205,12 @@ struct LayerSelectionInfo
++slot)
{
OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
- ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
+
+ if (!parentLayerOutputSlot)
+ {
+ throw armnn::NullPointerException("The input slots must be connected here.");
+ }
+
if (parentLayerOutputSlot)
{
Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer();
@@ -273,7 +283,10 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
for (auto inputSlot : layer.GetInputSlots())
{
auto connectedInput = PolymorphicDowncast<OutputSlot*>(inputSlot.GetConnection());
- ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
+ if (!connectedInput)
+ {
+ throw armnn::Exception("Dangling input slot detected.");
+ }
Layer& inputLayer = connectedInput->GetOwningLayer();
auto parentInfo = layerInfos.find(&inputLayer);
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index ab4ecc9194..3b116d90e0 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 6858b364a6..67654ac8b6 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -42,7 +42,12 @@ void AbsLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
}
@@ -52,4 +57,4 @@ void AbsLayer::ExecuteStrategy(IStrategy &strategy) const
strategy.ExecuteStrategy(this, GetParameters(), {}, GeName());
}
-} // namespace armnn \ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index fe4aaa766f..999415df4b 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ActivationLayer.hpp"
@@ -40,7 +40,12 @@ void ActivationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
}
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 37986572a0..537d7d10c5 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,7 +36,11 @@ ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
TensorShape inputShape = inputShapes[0];
auto inputNumDimensions = inputShape.GetNumDimensions();
@@ -44,7 +48,13 @@ std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<Ten
auto axis = m_Param.m_Axis;
auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
- ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
+ if (unsignedAxis > inputNumDimensions)
+ {
+ throw armnn::LayerValidationException("Axis must not be greater than number of input dimensions (\""
+ + std::to_string(unsignedAxis) +
+ "\" vs \""
+ + std::to_string(inputNumDimensions) + "\").");
+ }
// 1D input shape results in scalar output
if (inputShape.GetNumDimensions() == 1)
@@ -81,7 +91,12 @@ void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
}
diff --git a/src/armnn/layers/BatchMatMulLayer.cpp b/src/armnn/layers/BatchMatMulLayer.cpp
index 8b2629c4ea..cafb051c7b 100644
--- a/src/armnn/layers/BatchMatMulLayer.cpp
+++ b/src/armnn/layers/BatchMatMulLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "BatchMatMulLayer.hpp"
@@ -32,7 +32,11 @@ BatchMatMulLayer* BatchMatMulLayer::Clone(Graph& graph) const
std::vector<TensorShape> BatchMatMulLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::LayerValidationException("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
TensorShape inputXShape = inputShapes[0];
TensorShape inputYShape = inputShapes[1];
@@ -102,9 +106,14 @@ void BatchMatMulLayer::ValidateTensorShapesFromInputs()
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchMatMulLayer");
}
-} // namespace armnn \ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 17463f8d4f..9936041093 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "BatchNormalizationLayer.hpp"
@@ -21,10 +21,25 @@ BatchNormalizationLayer::BatchNormalizationLayer(const armnn::BatchNormalization
std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- ARMNN_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
- ARMNN_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
- ARMNN_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
- ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
+ if (!m_Mean)
+ {
+ throw armnn::NullPointerException("BatchNormalizationLayer: Mean data should not be null.");
+ }
+
+ if (!m_Variance)
+ {
+ throw armnn::NullPointerException("BatchNormalizationLayer: Variance data should not be null.");
+ }
+
+ if (!m_Beta)
+ {
+ throw armnn::NullPointerException("BatchNormalizationLayer: Beta data should not be null.");
+ }
+
+ if (!m_Gamma)
+ {
+ throw armnn::NullPointerException("BatchNormalizationLayer: Gamma data should not be null.");
+ }
BatchNormalizationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
@@ -59,7 +74,12 @@ void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchNormalizationLayer");
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 63817dde16..9f604503b9 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,7 +45,12 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchToSpaceNdLayer");
}
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index fc1ab81d4f..8dff6ba5d5 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "CastLayer.hpp"
@@ -41,7 +41,12 @@ void CastLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
}
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index ce6c0bac11..b05f63cd92 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -42,9 +42,14 @@ void ChannelShuffleLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = Layer::InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
}
-} \ No newline at end of file
+}
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 5d18a58f3c..dc5437bb6f 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,7 +35,12 @@ ComparisonLayer* ComparisonLayer::Clone(Graph& graph) const
std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
TensorShape input0 = inputShapes[0];
TensorShape input1 = inputShapes[1];
@@ -55,8 +60,10 @@ std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<Te
unsigned int dim1 = input1[i - shiftedDims];
// Validate inputs are broadcast compatible.
- ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
- "Dimensions should either match or one should be of size 1.");
+ if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+ {
+ throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+ }
dims[i] = std::max(dim0, dim1);
}
@@ -82,7 +89,13 @@ void ComparisonLayer::ValidateTensorShapesFromInputs()
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()
});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
}
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 4629bf245e..021e736bb8 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ConcatLayer.hpp"
@@ -164,7 +164,11 @@ void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
OutputHandler& outputHandler = slot->GetOutputHandler();
- ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
+ if (!subTensor)
+ {
+ throw armnn::Exception("ConcatLayer: Expected a valid sub-tensor for substitution.");
+ }
+
outputHandler.SetData(std::move(subTensor));
Layer& inputLayer = slot->GetOwningLayer();
@@ -193,7 +197,10 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- ARMNN_ASSERT(handleFactory);
+ if (!handleFactory)
+ {
+ throw armnn::NullPointerException("handleFactory is returning a nullptr.");
+ }
CreateTensors(registry, *handleFactory, isMemoryManaged);
}
}
@@ -205,7 +212,13 @@ ConcatLayer* ConcatLayer::Clone(Graph& graph) const
std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+ if (inputShapes.size() != m_Param.GetNumViews())
+ {
+ throw armnn::Exception("inputShapes' and m_NumViews' sizes do not match (\""
+ + std::to_string(inputShapes.size()) +
+ "\" vs \""
+ + std::to_string(m_Param.GetNumViews()) + "\")");
+ }
unsigned int numDims = m_Param.GetNumDimensions();
for (unsigned int i=0; i< inputShapes.size(); i++)
@@ -315,7 +328,12 @@ void ConcatLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes(inputShapes);
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
}
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 9fefe2016a..d0b00cb374 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -42,7 +42,12 @@ void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
}
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index f1abba32c6..898ef3053a 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ConvertFp32ToFp16Layer.hpp"
@@ -42,7 +42,12 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index df971a517d..2fcc4aa755 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -63,15 +63,30 @@ Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const TensorShape filterShape = inputShapes[1];
// If we support multiple batch dimensions in the future, then this assert will need to change.
- ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+ if (inputShape.GetNumDimensions() != 4)
+ {
+ throw armnn::Exception("Convolutions will always have 4D input.");
+ }
+
+ if (m_Param.m_StrideX == 0)
+ {
+ throw armnn::Exception("m_StrideX cannot be 0.");
+ }
- ARMNN_ASSERT( m_Param.m_StrideX > 0);
- ARMNN_ASSERT( m_Param.m_StrideY > 0);
+ if (m_Param.m_StrideY == 0)
+ {
+ throw armnn::Exception("m_StrideY cannot be 0.");
+ }
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -107,14 +122,21 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
- "Convolution2dLayer: Weights should be connected to input slot 1.");
+ if (!GetInputSlot(1).GetConnection())
+ {
+ throw armnn::NullPointerException("Convolution2dLayer: Weights should be connected to input slot 1.");
+ }
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
}
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index 2d697beb1f..89ea0042ce 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,15 +61,34 @@ Convolution3dLayer* Convolution3dLayer::Clone(Graph& graph) const
std::vector<TensorShape> Convolution3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const TensorShape& filterShape = inputShapes[1];
- ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Convolutions will always have 5D input.");
+ if (inputShape.GetNumDimensions() != 5)
+ {
+ throw armnn::Exception("Convolutions will always have 5D input.");
+ }
+
+ if (m_Param.m_StrideX == 0)
+ {
+ throw armnn::Exception("m_StrideX cannot be 0.");
+ }
+
+ if (m_Param.m_StrideY == 0)
+ {
+ throw armnn::Exception("m_StrideY cannot be 0.");
+ }
- ARMNN_ASSERT( m_Param.m_StrideX > 0);
- ARMNN_ASSERT( m_Param.m_StrideY > 0);
- ARMNN_ASSERT( m_Param.m_StrideZ > 0);
+ if (m_Param.m_StrideZ == 0)
+ {
+ throw armnn::Exception("m_StrideZ cannot be 0.");
+ }
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -112,14 +131,21 @@ void Convolution3dLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
- "Convolution3dLayer: Weights should be connected to input slot 1.");
+ if (!GetInputSlot(1).GetConnection())
+ {
+ throw armnn::LayerValidationException("Convolution3dLayer: Weights should be connected to input slot 1.");
+ }
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution3dLayer");
}
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 01c1c7be6b..ca8215ddb4 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "DebugLayer.hpp"
@@ -53,7 +53,12 @@ void DebugLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
}
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index b94eccc1ee..b303474ab4 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,11 @@ DepthToSpaceLayer* DepthToSpaceLayer::Clone(Graph& graph) const
std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
@@ -70,7 +74,12 @@ void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
}
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 4f08b2324e..69c3d380af 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -64,14 +64,30 @@ DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) co
std::vector<TensorShape>
DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const TensorShape& filterShape = inputShapes[1];
- ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+ if (inputShape.GetNumDimensions() != 4)
+ {
+ throw armnn::Exception("Convolutions will always have 4D input.");
+ }
+
+ if (m_Param.m_StrideX == 0)
+ {
+ throw armnn::Exception("m_StrideX cannot be 0.");
+ }
+
+ if (m_Param.m_StrideY == 0)
+ {
+ throw armnn::Exception("m_StrideY cannot be 0.");
+ }
- ARMNN_ASSERT( m_Param.m_StrideX > 0);
- ARMNN_ASSERT( m_Param.m_StrideY > 0);
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -110,15 +126,22 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
- "DepthwiseConvolution2dLayer: Weights data should not be null.");
+ if (!GetInputSlot(1).GetConnection())
+ {
+ throw armnn::LayerValidationException("DepthwiseConvolution2dLayer: Weights data should not be null.");
+ }
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()
});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
}
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index b398cf6e0d..79ab969719 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "DequantizeLayer.hpp"
@@ -41,7 +41,12 @@ void DequantizeLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
}
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 6bddf51551..f71f72af3a 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -45,19 +45,33 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// on this level constant data should not be released.
- ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
+ if (!m_Anchors)
+ {
+ throw armnn::LayerValidationException("DetectionPostProcessLayer: Anchors data should not be null.");
+ }
- ARMNN_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
+ if (GetNumOutputSlots() != 4)
+ {
+ throw armnn::LayerValidationException("DetectionPostProcessLayer: The layer should return 4 outputs.");
+ }
std::vector<TensorShape> inferredShapes = InferOutputShapes(
{ GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 4);
- ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
- ARMNN_ASSERT(inferredShapes[1].GetDimensionality() == Dimensionality::Specified);
- ARMNN_ASSERT(inferredShapes[2].GetDimensionality() == Dimensionality::Specified);
- ARMNN_ASSERT(inferredShapes[3].GetDimensionality() == Dimensionality::Specified);
+ if (inferredShapes.size() != 4)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " element(s) - should only have 4.");
+ }
+
+ if (std::any_of(inferredShapes.begin(), inferredShapes.end(), [] (auto&& inferredShape) {
+ return inferredShape.GetDimensionality() != Dimensionality::Specified;
+ }))
+ {
+ throw armnn::Exception("One of inferredShapes' dimensionalities is not specified.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DetectionPostProcessLayer");
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 3cbddfa5db..e813f48daf 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2018,2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,7 +22,12 @@ ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots,
std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
TensorShape input0 = inputShapes[0];
TensorShape input1 = inputShapes[1];
@@ -43,8 +48,10 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
unsigned int dim1 = input1[i - shiftedDims];
// Validate inputs are broadcast compatible.
- ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
- "Dimensions should either match or one should be of size 1.");
+ if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+ {
+ throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+ }
dims[i] = std::max(dim0, dim1);
}
@@ -69,7 +76,12 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.cpp b/src/armnn/layers/ElementwiseBinaryLayer.cpp
index 67619fc463..5459aafbca 100644
--- a/src/armnn/layers/ElementwiseBinaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseBinaryLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,12 @@ ElementwiseBinaryLayer* ElementwiseBinaryLayer::Clone(Graph& graph) const
std::vector<TensorShape> ElementwiseBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
TensorShape input0 = inputShapes[0];
TensorShape input1 = inputShapes[1];
@@ -51,8 +56,10 @@ std::vector<TensorShape> ElementwiseBinaryLayer::InferOutputShapes(const std::ve
unsigned int dim1 = input1[i - shiftedDims];
// Validate inputs are broadcast compatible.
- ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
- "Dimensions should either match or one should be of size 1.");
+ if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+ {
+ throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+ }
dims[i] = std::max(dim0, dim1);
}
@@ -77,7 +84,12 @@ void ElementwiseBinaryLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index c648f9b863..791a3d5d89 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,7 +34,12 @@ ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
// Should return the shape of the input tensor
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& input = inputShapes[0];
return std::vector<TensorShape>({ input });
@@ -50,7 +55,13 @@ void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index a612b5a4ec..bb9e6a4a7a 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FakeQuantizationLayer.hpp"
@@ -41,7 +41,12 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
}
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index af01b99847..c40efb33fb 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FillLayer.hpp"
@@ -41,7 +41,12 @@ void FillLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
// Cannot validate the output shape from the input shape. but we can validate that the correct dims have been
// inferred
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 2db8d91b4b..1177b93054 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FloorLayer.hpp"
@@ -40,7 +40,13 @@ void FloorLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
}
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 87a8adacaa..5b6b2a34d0 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "FullyConnectedLayer.hpp"
@@ -34,7 +34,12 @@ FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const TensorShape weightShape = inputShapes[1];
@@ -55,8 +60,17 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs()
{GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
- ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
+
+ if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
+ {
+ throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FullyConnectedLayer");
}
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index ae5ecd6cb6..359f3118dd 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,7 +33,12 @@ GatherLayer* GatherLayer::Clone(Graph& graph) const
std::vector<TensorShape> GatherLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& params = inputShapes[0];
const TensorShape& indices = inputShapes[1];
@@ -82,9 +87,19 @@ void GatherLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes(
{GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
- ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified ||
- inferredShapes[0].GetDimensionality() == Dimensionality::Scalar);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
+
+ if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified &&
+ inferredShapes[0].GetDimensionality() != Dimensionality::Scalar)
+ {
+ throw armnn::LayerValidationException("inferredShapes' dimensionality is neither specified nor scalar.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
}
diff --git a/src/armnn/layers/GatherNdLayer.cpp b/src/armnn/layers/GatherNdLayer.cpp
index 0f06946634..56e1500aa6 100644
--- a/src/armnn/layers/GatherNdLayer.cpp
+++ b/src/armnn/layers/GatherNdLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,7 +33,12 @@ GatherNdLayer* GatherNdLayer::Clone(Graph& graph) const
std::vector<TensorShape> GatherNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& params = inputShapes[0];
const TensorShape& indices = inputShapes[1];
@@ -47,7 +52,13 @@ std::vector<TensorShape> GatherNdLayer::InferOutputShapes(const std::vector<Tens
// last dimension of indices
unsigned int index_depth = indices[indicesDim - 1];
- ARMNN_ASSERT(index_depth <= paramsDim);
+ if (index_depth > paramsDim)
+ {
+ throw armnn::Exception("index_depth must not be greater than paramsDim (\""
+ + std::to_string(index_depth) +
+ "\" vs \""
+ + std::to_string(paramsDim) + "\")");
+ }
// all but the last dimension of indices
std::vector<unsigned int> outer_shape;
@@ -86,9 +97,19 @@ void GatherNdLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes(
{GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
- ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified ||
- inferredShapes[0].GetDimensionality() == Dimensionality::Scalar);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
+
+ if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified &&
+ inferredShapes[0].GetDimensionality() != Dimensionality::Scalar)
+ {
+ throw armnn::LayerValidationException("inferredShapes' dimensionality is neither specified nor scalar.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherNdLayer");
}
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index db6cd206cc..9cc9745ea3 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "InstanceNormalizationLayer.hpp"
@@ -41,7 +41,12 @@ void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
}
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 2d268dddef..14a5f90d06 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "L2NormalizationLayer.hpp"
@@ -41,7 +41,12 @@ void L2NormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
}
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 872d42297f..da82dfe658 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -40,7 +40,13 @@ void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
}
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 84a6e8e812..a781d6e283 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,11 +33,23 @@ LogicalBinaryLayer* LogicalBinaryLayer::Clone(Graph& graph) const
std::vector<TensorShape> LogicalBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& input0 = inputShapes[0];
const TensorShape& input1 = inputShapes[1];
- ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ if (input0.GetNumDimensions() != input1.GetNumDimensions())
+ {
+ throw armnn::Exception("Input dimensions do not match (\""
+ + std::to_string(input0.GetNumDimensions()) +
+ "\" vs \""
+ + std::to_string(input1.GetNumDimensions()) + "\").");
+ }
+
unsigned int numDims = input0.GetNumDimensions();
std::vector<unsigned int> dims(numDims);
@@ -46,8 +58,10 @@ std::vector<TensorShape> LogicalBinaryLayer::InferOutputShapes(const std::vector
unsigned int dim0 = input0[i];
unsigned int dim1 = input1[i];
- ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
- "Dimensions should either match or one should be of size 1.");
+ if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+ {
+ throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+ }
dims[i] = std::max(dim0, dim1);
}
@@ -67,7 +81,13 @@ void LogicalBinaryLayer::ValidateTensorShapesFromInputs()
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()
});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
}
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 0e6f3d882b..d87ad6461e 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "LstmLayer.hpp"
@@ -149,7 +149,11 @@ LstmLayer* LstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 3);
+ if (inputShapes.size() != 3)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"3\".");
+ }
// Get input values for validation
unsigned int batchSize = inputShapes[0][0];
@@ -179,69 +183,148 @@ void LstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetTensorInfo().GetShape()
});
- ARMNN_ASSERT(inferredShapes.size() == 4);
+ if (inferredShapes.size() != 4)
+ {
+ throw armnn::Exception("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " element(s) - should only have 4.");
+ }
// Check if the weights are nullptr
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
- "LstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
- "LstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
- "LstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
- "LstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
- "LstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
- "LstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
- "LstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
- "LstmLayer: m_BasicParameters.m_CellBias should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
- "LstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+ if (!m_BasicParameters.m_InputToForgetWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_InputToForgetWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_InputToCellWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_InputToCellWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_InputToOutputWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_InputToOutputWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToForgetWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToCellWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToOutputWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_ForgetGateBias)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_ForgetGateBias should not be null.");
+ }
+
+ if (!m_BasicParameters.m_CellBias)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_CellBias should not be null.");
+ }
+
+ if (!m_BasicParameters.m_OutputGateBias)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_BasicParameters.m_OutputGateBias should not be null.");
+ }
if (!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
- "LstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
- "LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
- "LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+ if (!m_CifgParameters.m_InputToInputWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_CifgParameters.m_InputToInputWeights should not be null.");
+ }
+
+ if (!m_CifgParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+ }
+
+ if (!m_CifgParameters.m_InputGateBias)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_CifgParameters.m_InputGateBias should not be null.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
}
else
{
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
- "LstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
- "LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value when CIFG is enabled.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
- "LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
+ if (m_CifgParameters.m_InputToInputWeights)
+ {
+ throw armnn::Exception("LstmLayer: "
+ "m_CifgParameters.m_InputToInputWeights should not have a value "
+ "when CIFG is enabled.");
+ }
+
+ if (m_CifgParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::Exception("LstmLayer: "
+ "m_CifgParameters.m_RecurrentToInputWeights should not have a value "
+ "when CIFG is enabled.");
+ }
+
+ if (m_CifgParameters.m_InputGateBias)
+ {
+ throw armnn::Exception("LstmLayer: "
+ "m_CifgParameters.m_InputGateBias should not have a value "
+ "when CIFG is enabled.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
}
if (m_Param.m_ProjectionEnabled)
{
- ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
- "LstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
+ if (!m_ProjectionParameters.m_ProjectionWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_ProjectionParameters.m_ProjectionWeights should not be null.");
+ }
}
if (m_Param.m_PeepholeEnabled)
{
if (!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
- "LstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
- "when Peephole is enabled and CIFG is disabled.");
+ if (!m_PeepholeParameters.m_CellToInputWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_PeepholeParameters.m_CellToInputWeights should not be null "
+ "when Peephole is enabled and CIFG is disabled.");
+ }
+ }
+
+ if (!m_PeepholeParameters.m_CellToForgetWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+ }
+
+ if (!m_PeepholeParameters.m_CellToOutputWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
- "LstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
- "LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
ValidateAndCopyShape(
@@ -255,15 +338,30 @@ void LstmLayer::ValidateTensorShapesFromInputs()
{
if(!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
- "LstmLayer: m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
+ if (!m_LayerNormParameters.m_InputLayerNormWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
+ }
+ }
+
+ if (!m_LayerNormParameters.m_ForgetLayerNormWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
+ }
+
+ if (!m_LayerNormParameters.m_CellLayerNormWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
+ }
+
+ if (!m_LayerNormParameters.m_OutputLayerNormWeights)
+ {
+ throw armnn::NullPointerException("LstmLayer: "
+ "m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
}
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
- "LstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
- "LstmLayer: m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
- "LstmLayer: m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
}
}
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 6141974122..71814a24de 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "MapLayer.hpp"
@@ -38,7 +38,11 @@ void MapLayer::ValidateTensorShapesFromInputs()
{
// validates that the input is connected.
VerifyLayerConnections(1, CHECK_LOCATION());
- ARMNN_ASSERT(GetNumOutputSlots() == 0);
+ if (GetNumOutputSlots() != 0)
+ {
+ throw armnn::LayerValidationException("Output slots must be \"0\" - currently \""
+ + std::to_string(GetNumOutputSlots()) + "\".");
+ }
}
void MapLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index bd49f509a0..62a3923750 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -52,19 +52,35 @@ void MeanLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes(
{ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
- ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
+
+ if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
+ {
+ throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MeanLayer");
}
std::vector<TensorShape> MeanLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& input = inputShapes[0];
- ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
- "MeanLayer: Mean supports up to 4D input.");
+ if (auto inputDims = input.GetNumDimensions(); inputDims != std::clamp(inputDims, 1u, 4u))
+ {
+ throw armnn::Exception("ReduceLayer: Reduce supports up to 4D input.");
+ }
unsigned int rank = input.GetNumDimensions();
unsigned int outputRank = 0;
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 6dd203448d..6fc7d73dd9 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "MemCopyLayer.hpp"
@@ -44,7 +44,12 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
}
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index a1c92f676e..10e6cd44c2 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "MemImportLayer.hpp"
@@ -44,7 +44,12 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
}
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index a3b098ae00..e8f92177eb 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "MergeLayer.hpp"
@@ -40,14 +40,23 @@ void MergeLayer::ValidateTensorShapesFromInputs()
GetInputSlot(1).GetTensorInfo().GetShape(),
});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MergeLayer");
}
std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
ConditionalThrowIfNotEqual<LayerValidationException>(
"MergeLayer: TensorShapes set on inputs do not match",
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 24b6788ee6..b604b05d0f 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NormalizationLayer.hpp"
@@ -41,7 +41,12 @@ void NormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
}
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 0024ba5541..4b0b0e1d49 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -41,12 +41,28 @@ PadLayer* PadLayer::Clone(Graph& graph) const
std::vector<TensorShape> PadLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
unsigned int rank = inputShape.GetNumDimensions();
- ARMNN_ASSERT(m_Param.m_PadList.size() == rank);
- ARMNN_ASSERT(rank != 0);
+
+ if (m_Param.m_PadList.size() != rank)
+ {
+ throw armnn::Exception("Mismatch in size of mPadList and rank (\""
+ + std::to_string(m_Param.m_PadList.size()) +
+ "\" vs "
+ + std::to_string(rank) + ")");
+ }
+
+ if (rank == 0)
+ {
+ throw armnn::Exception("rank must not equal 0.");
+ }
std::vector<unsigned int> outputDimensionSizes(rank);
for (unsigned int i = 0; i < rank; ++i)
@@ -68,7 +84,12 @@ void PadLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
}
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index f8803a167e..3d3efc3209 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,12 @@ PermuteLayer* PermuteLayer::Clone(Graph& graph) const
std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& inShape = inputShapes[0];
return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
}
@@ -52,7 +57,12 @@ void PermuteLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
}
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index e423b8bf38..1003867705 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,12 +39,20 @@ Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
// If we support multiple batch dimensions in the future, then this assert will need to change.
- ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
+ if (inputShape.GetNumDimensions() != 4)
+ {
+ throw armnn::Exception("Pooling2dLayer will always have 4D input.");
+ }
unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
@@ -56,8 +64,10 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
unsigned int outHeight = 1;
if (!isGlobalPooling)
{
- ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
- "Stride can only be zero when performing global pooling");
+ if (!m_Param.m_StrideX || !m_Param.m_StrideY)
+ {
+ throw armnn::Exception("Stride can only be zero when performing global pooling");
+ }
auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
{
@@ -74,7 +84,7 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
size = static_cast<unsigned int>(floor(div)) + 1;
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+ throw armnn::Exception("Unsupported Output Shape Rounding");
}
// MakeS sure that border operations will start from inside the input and not the padded area.
@@ -112,7 +122,12 @@ void Pooling2dLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
}
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
index ec1ec80f0e..0506efa7d3 100644
--- a/src/armnn/layers/Pooling3dLayer.cpp
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,12 +39,20 @@ Pooling3dLayer* Pooling3dLayer::Clone(Graph& graph) const
std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
// If we support multiple batch dimensions in the future, then this assert will need to change.
- ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Pooling3dLayer will always have 5D input.");
+ if (inputShape.GetNumDimensions() != 5)
+ {
+ throw armnn::Exception("Pooling3dLayer will always have 5D input.");
+ }
unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
@@ -58,8 +66,10 @@ std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<Ten
unsigned int outDepth = 1;
if (!isGlobalPooling)
{
- ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0 && m_Param.m_StrideZ!=0,
- "Stride can only be zero when performing global pooling");
+ if (!m_Param.m_StrideX || !m_Param.m_StrideY || !m_Param.m_StrideZ)
+ {
+ throw armnn::Exception("Stride can only be zero when performing global pooling");
+ }
auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
{
@@ -76,7 +86,7 @@ std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<Ten
size = static_cast<unsigned int>(floor(div)) + 1;
break;
default:
- ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+ throw armnn::Exception("Unsupported Output Shape Rounding");
}
// Makes sure that border operations will start from inside the input and not the padded area.
@@ -116,7 +126,12 @@ void Pooling3dLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
}
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index a302640434..874ee6b152 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,11 @@ PreluLayer* PreluLayer::Clone(Graph& graph) const
std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
const TensorShape& inputShape = inputShapes[0];
const TensorShape& alphaShape = inputShapes[1];
@@ -45,8 +49,16 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
- ARMNN_ASSERT(inputShapeDimensions > 0);
- ARMNN_ASSERT(alphaShapeDimensions > 0);
+ if (inputShapeDimensions == 0)
+ {
+ throw armnn::Exception("inputShapeDimensions must be greater than 0.");
+ }
+
+ if (alphaShapeDimensions == 0)
+ {
+ throw armnn::Exception("alphaShapeDimensions must be not be zero (\""
+ + std::to_string(alphaShapeDimensions) + "\")");
+ }
// The size of the output is the maximum size along each dimension of the input operands,
// it starts with the trailing dimensions, and works its way forward
@@ -66,8 +78,10 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
unsigned int alphaDimension = alphaShape[armnn::numeric_cast<unsigned int>(alphaShapeIndex)];
// Check that the inputs are broadcast compatible
- ARMNN_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
- "PreluLayer: Dimensions should either match or one should be of size 1");
+ if (inputDimension != alphaDimension && inputDimension != 1 && alphaDimension != 1)
+ {
+ throw armnn::Exception("PreluLayer: Dimensions should either match or one should be of size 1");
+ }
outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
@@ -111,7 +125,12 @@ void PreluLayer::ValidateTensorShapesFromInputs()
GetInputSlot(1).GetTensorInfo().GetShape()
});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
}
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index eeb01db51d..e98deb6a88 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "QLstmLayer.hpp"
@@ -152,7 +152,11 @@ QLstmLayer* QLstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 3);
+ if (inputShapes.size() != 3)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"3\".");
+ }
// Get input values for validation
unsigned int batchSize = inputShapes[0][0];
@@ -182,70 +186,147 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetTensorInfo().GetShape() // previousCellStateIn
});
- ARMNN_ASSERT(inferredShapes.size() == 3);
+ if (inferredShapes.size() != 3)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " element(s) - should only have 3.");
+ }
// Check if the weights are nullptr for basic params
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
- "QLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
- "QLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
- "QLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
- "QLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
- "QLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
- "QLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
- "QLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
- "QLstmLayer: m_BasicParameters.m_CellBias should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
- "QLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+ if (!m_BasicParameters.m_InputToForgetWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_InputToForgetWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_InputToCellWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_InputToCellWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_InputToOutputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_InputToOutputWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToForgetWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToCellWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToOutputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_ForgetGateBias)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_ForgetGateBias should not be null.");
+ }
+
+ if (!m_BasicParameters.m_CellBias)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_CellBias should not be null.");
+ }
+
+ if (!m_BasicParameters.m_OutputGateBias)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_BasicParameters.m_OutputGateBias should not be null.");
+ }
if (!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
- "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
- "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
- "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+ if (!m_CifgParameters.m_InputToInputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_CifgParameters.m_InputToInputWeights should not be null.");
+ }
+
+ if (!m_CifgParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+ }
+
+ if (!m_CifgParameters.m_InputGateBias)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_CifgParameters.m_InputGateBias should not be null.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
}
else
{
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
- "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
- "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should "
- "not have a value when CIFG is enabled.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
- "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
+ if (m_CifgParameters.m_InputToInputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_CifgParameters.m_InputToInputWeights "
+ "should not have a value when CIFG is enabled.");
+ }
+
+ if (m_CifgParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_CifgParameters.m_RecurrentToInputWeights "
+ "should not have a value when CIFG is enabled.");
+ }
+
+ if (m_CifgParameters.m_InputGateBias)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_CifgParameters.m_InputGateBias "
+ "should not have a value when CIFG is enabled.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
}
if (m_Param.m_ProjectionEnabled)
{
- ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
- "QLstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
+ if (!m_ProjectionParameters.m_ProjectionWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_ProjectionParameters.m_ProjectionWeights should not be null.");
+ }
}
if (m_Param.m_PeepholeEnabled)
{
if (!m_Param.m_CifgEnabled) {
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
- "QLstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
- "when Peephole is enabled and CIFG is disabled.");
+ if (!m_PeepholeParameters.m_CellToInputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_PeepholeParameters.m_CellToInputWeights should not be null "
+ "when Peephole is enabled and CIFG is disabled.");
+ }
}
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
- "QLstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
- "QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+ if (!m_PeepholeParameters.m_CellToForgetWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+ }
+
+ if (!m_PeepholeParameters.m_CellToOutputWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+ }
}
ValidateAndCopyShape(
@@ -255,17 +336,32 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
if (m_Param.m_LayerNormEnabled)
{
- if(!m_Param.m_CifgEnabled)
+ if (!m_Param.m_CifgEnabled)
+ {
+ if (!m_LayerNormParameters.m_InputLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights "
+ "should not be null.");
+ }
+ }
+
+ if (!m_LayerNormParameters.m_ForgetLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
+ }
+
+ if (!m_LayerNormParameters.m_CellLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
+ }
+
+ if (!m_LayerNormParameters.m_OutputLayerNormWeights)
{
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
- "QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights should not be null.");
+ throw armnn::LayerValidationException("QLstmLayer: "
+ "m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
}
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
- "QLstmLayer: m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
- "QLstmLayer: m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
- "QLstmLayer: m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
}
}
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index c82e34f983..ebe320718c 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "QuantizedLstmLayer.hpp"
@@ -80,7 +80,11 @@ QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 3);
+ if (inputShapes.size() != 3)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"3\".");
+ }
// Get input values for validation
unsigned int numBatches = inputShapes[0][0];
@@ -108,35 +112,97 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetTensorInfo().GetShape() // previousOutputIn
});
- ARMNN_ASSERT(inferredShapes.size() == 2);
+ if (inferredShapes.size() != 2)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " element(s) - should only have 2.");
+ }
// Check weights and bias for nullptr
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
-
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
-
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
- ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
- "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
+ if (!m_QuantizedLstmParameters.m_InputToInputWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_InputToInputWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_InputToForgetWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_InputToForgetWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_InputToCellWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_InputToCellWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_InputToOutputWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_InputToOutputWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_RecurrentToInputWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_RecurrentToForgetWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_RecurrentToForgetWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_RecurrentToCellWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_RecurrentToCellWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_RecurrentToOutputWeights)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_RecurrentToOutputWeights "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_InputGateBias)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_InputGateBias "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_ForgetGateBias)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_ForgetGateBias "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_CellBias)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_CellBias "
+ "should not be null.");
+ }
+
+ if (!m_QuantizedLstmParameters.m_OutputGateBias)
+ {
+ throw armnn::LayerValidationException("QuantizedLstmLayer: "
+ "m_QuantizedLstmParameters.m_OutputGateBias "
+ "should not be null.");
+ }
// Check output TensorShape(s) match inferred shape
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizedLstmLayer");
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 21095dda74..bebd043e9b 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -1,6 +1,6 @@
//
// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -51,8 +51,10 @@ void ReduceLayer::ValidateTensorShapesFromInputs()
const TensorInfo& input = GetInputSlot(0).GetTensorInfo();
- ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
- "ReduceLayer: Reduce supports up to 4D input.");
+ if (auto inputDims = input.GetNumDimensions(); inputDims != std::clamp(inputDims, 1u, 4u))
+ {
+ throw armnn::LayerValidationException("ReduceLayer: Reduce supports up to 4D input.");
+ }
std::vector<TensorShape> inferredShapes = InferOutputShapes( {input.GetShape() });
@@ -61,11 +63,18 @@ void ReduceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> ReduceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& input = inputShapes[0];
- ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
- "ReduceLayer: Reduce supports up to 4D input.");
+ if (auto inputDims = input.GetNumDimensions(); inputDims != std::clamp(inputDims, 1u, 4u))
+ {
+ throw armnn::Exception("ReduceLayer: Reduce supports up to 4D input.");
+ }
unsigned int rank = input.GetNumDimensions();
unsigned int outputRank = 0;
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index b786f54fe3..f6480b0ff5 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ReshapeLayer.hpp"
@@ -48,8 +48,17 @@ void ReshapeLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
- ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
+
+ if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
+ {
+ throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
}
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 734df0a9a2..0b60db28ee 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,7 +38,11 @@ ResizeLayer* ResizeLayer::Clone(Graph& graph) const
std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
@@ -70,7 +74,12 @@ void ResizeLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
}
diff --git a/src/armnn/layers/ReverseV2Layer.cpp b/src/armnn/layers/ReverseV2Layer.cpp
index e1160b6e16..1c46b79875 100644
--- a/src/armnn/layers/ReverseV2Layer.cpp
+++ b/src/armnn/layers/ReverseV2Layer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,11 @@ ReverseV2Layer* ReverseV2Layer::Clone(armnn::Graph &graph) const
std::vector<TensorShape> ReverseV2Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
const auto inputDims = inputShapes[0].GetNumDimensions();
@@ -59,7 +63,12 @@ void ReverseV2Layer::ValidateTensorShapesFromInputs()
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReverseV2Layer");
}
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 91f6d10a69..10c05c4d22 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,12 @@ void RsqrtLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
}
@@ -53,4 +58,4 @@ void RsqrtLayer::ExecuteStrategy(IStrategy& strategy) const
strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
}
-} // namespace armnn \ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index e7e343c707..d810bef9dd 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,15 +44,23 @@ void ShapeLayer::ValidateTensorShapesFromInputs()
auto inferredShape = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShape.size() == 1);
+ if (inferredShape.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShape has "
+ + std::to_string(inferredShape.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShape[0], m_ShapeInferenceMethod, "ShapeLayer");
}
std::vector<TensorShape> ShapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- IgnoreUnused(inputShapes);
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
TensorShape outputShape({ inputShapes[0].GetNumDimensions()} );
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index a9327c6111..428e672adb 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,7 +44,12 @@ void SliceLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SliceLayer");
}
@@ -52,7 +57,12 @@ void SliceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
IgnoreUnused(inputShapes);
- ARMNN_ASSERT(inputShapes.size() == 1);
+
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
TensorShape outputShape(armnn::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 5f68278454..f0d5e4a1da 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SoftmaxLayer.hpp"
@@ -41,7 +41,12 @@ void SoftmaxLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
}
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index 277fc4479d..80728d0503 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -71,7 +71,12 @@ void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index c86758f67f..0083ad91c9 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -43,7 +43,11 @@ SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
@@ -72,7 +76,12 @@ void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
}
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index f8a2ae0e62..8a24e0df1f 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SplitterLayer.hpp"
@@ -188,7 +188,10 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- ARMNN_ASSERT(handleFactory);
+ if (!handleFactory)
+ {
+ throw armnn::NullPointerException("handleFactory is returning a nullptr.");
+ }
CreateTensors(registry, *handleFactory, isMemoryManaged);
}
}
@@ -200,8 +203,14 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- IgnoreUnused(inputShapes);
- ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+ if (inputShapes.size() != m_Param.GetNumViews())
+ {
+ throw armnn::Exception("inputShapes' and m_NumViews' sizes do not match (\""
+ + std::to_string(inputShapes.size()) +
+ "\" vs \""
+ + std::to_string(m_Param.GetNumViews()) + "\")");
+ }
+
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
@@ -228,7 +237,13 @@ void SplitterLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes(views);
- ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
+ if (inferredShapes.size() != m_Param.GetNumViews())
+ {
+ throw armnn::LayerValidationException("inferredShapes' size and m_NumViews do not match (\""
+ + std::to_string(inferredShapes.size()) +
+ "\" vs \""
+ + std::to_string(m_Param.GetNumViews()) + "\")");
+ }
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
{
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 3c5a216eb3..ea49949b69 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "StackLayer.hpp"
@@ -32,15 +32,19 @@ StackLayer* StackLayer::Clone(Graph& graph) const
return CloneBase<StackLayer>(graph, m_Param, GetName());
}
-std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>&) const
{
- IgnoreUnused(inputShapes);
-
const TensorShape& inputShape = m_Param.m_InputShape;
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
const unsigned int axis = m_Param.m_Axis;
- ARMNN_ASSERT(axis <= inputNumDimensions);
+ if (axis > inputNumDimensions)
+ {
+ throw armnn::Exception("axis must not be greater than input dimensions (\""
+ + std::to_string(axis) +
+ "\" vs \""
+ + std::to_string(inputNumDimensions) + "\").");
+ }
std::vector<unsigned int> dimensionSizes(inputNumDimensions + 1, 0);
for (unsigned int i = 0; i < axis; ++i)
@@ -90,7 +94,12 @@ void StackLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes(inputShapes);
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
}
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index 16aeab5f5a..c348951178 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "StridedSliceLayer.hpp"
@@ -47,7 +47,11 @@ StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const
std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
TensorShape inputShape = inputShapes[0];
std::vector<unsigned int> outputShape;
@@ -106,7 +110,12 @@ void StridedSliceLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
}
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 031dcec29a..afb2753e96 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SwitchLayer.hpp"
@@ -37,14 +37,22 @@ void SwitchLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
+ if (GetNumOutputSlots() != 2)
+ {
+ throw armnn::LayerValidationException("SwitchLayer: The layer should return 2 outputs.");
+ }
// Assuming first input is the Input and second input is the Constant
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetTensorInfo().GetShape(),
GetInputSlot(1).GetTensorInfo().GetShape()});
- ARMNN_ASSERT(inferredShapes.size() == 2);
+ if (inferredShapes.size() != 2)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " element(s) - should only have 2.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SwitchLayer");
diff --git a/src/armnn/layers/TileLayer.cpp b/src/armnn/layers/TileLayer.cpp
index d3629002e0..8e07478a54 100644
--- a/src/armnn/layers/TileLayer.cpp
+++ b/src/armnn/layers/TileLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,7 +31,12 @@ TileLayer* TileLayer::Clone(armnn::Graph &graph) const
std::vector<TensorShape> TileLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
uint32_t numberOfDimensions = inputShape.GetNumDimensions();
@@ -64,9 +69,14 @@ void TileLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TileLayer");
}
-} \ No newline at end of file
+}
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 3a7e8b889e..21dcf1f1d6 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -24,14 +24,20 @@ TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolut
std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
+ if (!m_Weight)
+ {
+ throw armnn::NullPointerException("TransposeConvolution2dLayer: Weights data should not be null.");
+ }
TransposeConvolution2dQueueDescriptor descriptor;
descriptor.m_Weight = m_Weight.get();
if (m_Param.m_BiasEnabled)
{
- ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
+ if (!m_Bias)
+ {
+ throw armnn::NullPointerException("TransposeConvolution2dLayer: Bias data should not be null.");
+ }
descriptor.m_Bias = m_Bias.get();
}
@@ -57,11 +63,19 @@ TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) co
std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 2);
+ if (inputShapes.size() != 2)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"2\".");
+ }
+
const TensorShape& inputShape = inputShapes[0];
const TensorShape& kernelShape = inputShapes[1];
- ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
+ if (inputShape.GetNumDimensions() != 4)
+ {
+ throw armnn::Exception("Transpose convolutions will always have 4D input");
+ }
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -95,7 +109,10 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
- ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
+ if (!m_Weight)
+ {
+ throw armnn::LayerValidationException("TransposeConvolution2dLayer: Weight data cannot be null.");
+ }
std::vector<TensorShape> expectedOutputShape;
std::vector<TensorShape> outputShapeGivenAsInput;
@@ -103,7 +120,12 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- ARMNN_ASSERT(expectedOutputShape.size() == 1);
+ if (expectedOutputShape.size() != 1)
+ {
+ throw armnn::LayerValidationException("expectedOutputShape' size is "
+ + std::to_string(expectedOutputShape.size()) +
+ " - should be \"1\".");
+ }
// If output_shape was specified then use it rather than calculate an inferred output shape.
if (m_Param.m_OutputShapeEnabled)
@@ -112,10 +134,19 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
m_Param.m_OutputShape.data());
outputShapeGivenAsInput.push_back(shapeAsTensorShape);
- ARMNN_ASSERT(outputShapeGivenAsInput.size() == 1);
- ARMNN_ASSERT_MSG(expectedOutputShape == outputShapeGivenAsInput,
- "TransposeConvolution2dLayer: output calculated by InferOutputShapes and "
- "the output given as an input parameter to the layer are not matching");
+ if (outputShapeGivenAsInput.size() != 1)
+ {
+ throw armnn::LayerValidationException("outputShapeGivenAsInput' size is "
+ + std::to_string(outputShapeGivenAsInput.size()) +
+ " - should be \"1\".");
+ }
+
+ if (expectedOutputShape != outputShapeGivenAsInput)
+ {
+ throw armnn::LayerValidationException("TransposeConvolution2dLayer: "
+ "output calculated by InferOutputShapes and the output given "
+ "as an input parameter to the layer are not matching");
+ }
}
ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 58e570ab38..f0b7139b26 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,7 +37,12 @@ TransposeLayer* TransposeLayer::Clone(Graph& graph) const
std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 1);
+ if (inputShapes.size() != 1)
+ {
+ throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"1\".");
+ }
+
const TensorShape& inShape = inputShapes[0];
return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
}
@@ -52,7 +57,12 @@ void TransposeLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
}
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 75f027e32d..68a0d8e2c2 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "UnidirectionalSequenceLstmLayer.hpp"
@@ -150,7 +150,9 @@ UnidirectionalSequenceLstmLayer* UnidirectionalSequenceLstmLayer::Clone(Graph& g
std::vector<TensorShape> UnidirectionalSequenceLstmLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- ARMNN_ASSERT(inputShapes.size() == 3);
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(inputShapes.size() == 3,
+ "inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+ "\" - should be \"3\".");
// Get input values for validation
unsigned int outputSize = inputShapes[1][1];
@@ -181,94 +183,178 @@ void UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetTensorInfo().GetShape()
});
- ARMNN_ASSERT(inferredShapes.size() == 1);
+ if (inferredShapes.size() != 1)
+ {
+ throw armnn::LayerValidationException("inferredShapes has "
+ + std::to_string(inferredShapes.size()) +
+ " elements - should only have 1.");
+ }
// Check if the weights are nullptr
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights "
- "should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights "
- "should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_CellBias should not be null.");
- ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
- "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+ if (!m_BasicParameters.m_InputToForgetWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_InputToForgetWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_InputToCellWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_InputToCellWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_InputToOutputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_InputToOutputWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToForgetWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToCellWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_RecurrentToOutputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+ }
+
+ if (!m_BasicParameters.m_ForgetGateBias)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_ForgetGateBias should not be null.");
+ }
+
+ if (!m_BasicParameters.m_CellBias)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_CellBias should not be null.");
+ }
+
+ if (!m_BasicParameters.m_OutputGateBias)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_BasicParameters.m_OutputGateBias should not be null.");
+ }
if (!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights "
- "should not be null.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
- "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+ if (!m_CifgParameters.m_InputToInputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_CifgParameters.m_InputToInputWeights should not be null.");
+ }
+
+ if (!m_CifgParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+ }
+
+ if (!m_CifgParameters.m_InputGateBias)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_CifgParameters.m_InputGateBias should not be null.");
+ }
}
else
{
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
- "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value "
- "when CIFG is enabled.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
- "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value "
- "when CIFG is enabled.");
- ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
- "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not have a value "
- "when CIFG is enabled.");
+ if (m_CifgParameters.m_InputToInputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_CifgParameters.m_InputToInputWeights should not have a value "
+ "when CIFG is enabled.");
+ }
+
+ if (m_CifgParameters.m_RecurrentToInputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_CifgParameters.m_RecurrentToInputWeights should not have a value "
+ "when CIFG is enabled.");
+ }
+
+ if (m_CifgParameters.m_InputGateBias)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_CifgParameters.m_InputGateBias should not have a value "
+ "when CIFG is enabled.");
+ }
}
if (m_Param.m_ProjectionEnabled)
{
- ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_ProjectionParameters.m_ProjectionWeights "
- "should not be null.");
+ if (!m_ProjectionParameters.m_ProjectionWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_ProjectionParameters.m_ProjectionWeights should not be null.");
+ }
}
if (m_Param.m_PeepholeEnabled)
{
if (!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToInputWeights "
- "should not be null "
- "when Peephole is enabled and CIFG is disabled.");
+ if (!m_PeepholeParameters.m_CellToInputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_PeepholeParameters.m_CellToInputWeights should not be null "
+ "when Peephole is enabled and CIFG is disabled.");
+ }
+ }
+
+ if (!m_PeepholeParameters.m_CellToForgetWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+ }
+
+ if (!m_PeepholeParameters.m_CellToOutputWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToForgetWeights "
- "should not be null.");
- ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToOutputWeights "
- "should not be null.");
}
if (m_Param.m_LayerNormEnabled)
{
if(!m_Param.m_CifgEnabled)
{
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_inputLayerNormWeights "
- "should not be null.");
+ if (!m_LayerNormParameters.m_InputLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_LayerNormParameters.m_inputLayerNormWeights "
+ "should not be null.");
+ }
+ }
+
+ if (!m_LayerNormParameters.m_ForgetLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_LayerNormParameters.m_forgetLayerNormWeights "
+ "should not be null.");
+ }
+
+ if (!m_LayerNormParameters.m_CellLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_LayerNormParameters.m_cellLayerNormWeights "
+ "should not be null.");
+ }
+
+ if (!m_LayerNormParameters.m_OutputLayerNormWeights)
+ {
+ throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+ "m_LayerNormParameters.m_outputLayerNormWeights "
+ "should not be null.");
}
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights "
- "should not be null.");
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_cellLayerNormWeights "
- "should not be null.");
- ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
- "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_outputLayerNormWeights "
- "should not be null.");
}
ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "UnidirectionalSequenceLstmLayer");
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index cfbde211ba..a62536f3bc 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "UnmapLayer.hpp"
@@ -38,7 +38,11 @@ void UnmapLayer::ValidateTensorShapesFromInputs()
{
// validates that the input is connected.
VerifyLayerConnections(1, CHECK_LOCATION());
- ARMNN_ASSERT(GetNumOutputSlots() == 0);
+ if (GetNumOutputSlots() != 0)
+ {
+ throw armnn::LayerValidationException("Output slots must be \"0\" - currently \""
+ + std::to_string(GetNumOutputSlots()) + "\".");
+ }
}
void UnmapLayer::ExecuteStrategy(IStrategy& strategy) const