aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2020-07-03 10:12:03 +0100
committerJim Flynn <jim.flynn@arm.com>2020-07-26 15:42:26 +0000
commitf24effa4995ea4c3dd91e33d4a2787e02decf8b4 (patch)
tree56e0f22cab0fd8544693b9240bd8d74426eaa454
parent8398edcfb933b638ddf4b88d84d6e188c49b1e0d (diff)
downloadarmnn-f24effa4995ea4c3dd91e33d4a2787e02decf8b4.tar.gz
IVGCVSW-5155 Update Arm NN API to allow for call to shape inference
Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I0a2babe5b5b09eb81c9900dc3a05071034a0440b
-rw-r--r--include/armnn/BackendOptions.hpp2
-rw-r--r--include/armnn/INetwork.hpp6
-rw-r--r--src/armnn/Graph.cpp10
-rw-r--r--src/armnn/Graph.hpp13
-rw-r--r--src/armnn/Layer.cpp28
-rw-r--r--src/armnn/Layer.hpp14
-rw-r--r--src/armnn/Network.cpp22
-rw-r--r--src/armnn/Network.hpp5
-rw-r--r--src/armnn/layers/AbsLayer.cpp6
-rw-r--r--src/armnn/layers/AbsLayer.hpp5
-rw-r--r--src/armnn/layers/ActivationLayer.cpp6
-rw-r--r--src/armnn/layers/ActivationLayer.hpp5
-rw-r--r--src/armnn/layers/AdditionLayer.hpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp6
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp5
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp5
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp6
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp5
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp6
-rw-r--r--src/armnn/layers/ComparisonLayer.hpp5
-rw-r--r--src/armnn/layers/ConcatLayer.cpp6
-rw-r--r--src/armnn/layers/ConcatLayer.hpp5
-rw-r--r--src/armnn/layers/ConstantLayer.cpp3
-rw-r--r--src/armnn/layers/ConstantLayer.hpp5
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.hpp5
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.hpp5
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.hpp5
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp6
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.hpp5
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp6
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp5
-rw-r--r--src/armnn/layers/DebugLayer.cpp6
-rw-r--r--src/armnn/layers/DebugLayer.hpp5
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp6
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.hpp5
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp6
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp5
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp6
-rw-r--r--src/armnn/layers/DequantizeLayer.hpp5
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp12
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp5
-rw-r--r--src/armnn/layers/DivisionLayer.hpp2
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp6
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp5
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp6
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.hpp5
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp6
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp5
-rw-r--r--src/armnn/layers/FillLayer.cpp4
-rw-r--r--src/armnn/layers/FillLayer.hpp5
-rw-r--r--src/armnn/layers/FloorLayer.cpp6
-rw-r--r--src/armnn/layers/FloorLayer.hpp5
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp8
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp5
-rw-r--r--src/armnn/layers/GatherLayer.cpp6
-rw-r--r--src/armnn/layers/GatherLayer.hpp3
-rw-r--r--src/armnn/layers/InputLayer.cpp6
-rw-r--r--src/armnn/layers/InputLayer.hpp5
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.hpp5
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/L2NormalizationLayer.hpp5
-rw-r--r--src/armnn/layers/LayerCloneBase.hpp3
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp6
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.hpp5
-rw-r--r--src/armnn/layers/LstmLayer.cpp14
-rw-r--r--src/armnn/layers/LstmLayer.hpp5
-rw-r--r--src/armnn/layers/MaximumLayer.hpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp6
-rw-r--r--src/armnn/layers/MeanLayer.hpp5
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp6
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp5
-rw-r--r--src/armnn/layers/MemImportLayer.cpp6
-rw-r--r--src/armnn/layers/MemImportLayer.hpp5
-rw-r--r--src/armnn/layers/MergeLayer.cpp6
-rw-r--r--src/armnn/layers/MergeLayer.hpp5
-rw-r--r--src/armnn/layers/MergerLayer.hpp2
-rw-r--r--src/armnn/layers/MinimumLayer.hpp2
-rw-r--r--src/armnn/layers/MultiplicationLayer.hpp2
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp6
-rw-r--r--src/armnn/layers/NormalizationLayer.hpp5
-rw-r--r--src/armnn/layers/OutputLayer.cpp3
-rw-r--r--src/armnn/layers/OutputLayer.hpp5
-rw-r--r--src/armnn/layers/PadLayer.cpp3
-rw-r--r--src/armnn/layers/PadLayer.hpp5
-rw-r--r--src/armnn/layers/PermuteLayer.cpp6
-rw-r--r--src/armnn/layers/PermuteLayer.hpp5
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp6
-rw-r--r--src/armnn/layers/Pooling2dLayer.hpp5
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp3
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp5
-rw-r--r--src/armnn/layers/PreluLayer.cpp6
-rw-r--r--src/armnn/layers/PreluLayer.hpp5
-rw-r--r--src/armnn/layers/QLstmLayer.cpp12
-rw-r--r--src/armnn/layers/QLstmLayer.hpp5
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp6
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp5
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp8
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp5
-rw-r--r--src/armnn/layers/RankLayer.cpp7
-rw-r--r--src/armnn/layers/RankLayer.hpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp6
-rw-r--r--src/armnn/layers/ReshapeLayer.hpp5
-rw-r--r--src/armnn/layers/ResizeLayer.cpp6
-rw-r--r--src/armnn/layers/ResizeLayer.hpp5
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp6
-rw-r--r--src/armnn/layers/RsqrtLayer.hpp5
-rw-r--r--src/armnn/layers/SliceLayer.cpp6
-rw-r--r--src/armnn/layers/SliceLayer.hpp5
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp6
-rw-r--r--src/armnn/layers/SoftmaxLayer.hpp5
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp6
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.hpp5
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp6
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.hpp5
-rw-r--r--src/armnn/layers/SplitterLayer.cpp6
-rw-r--r--src/armnn/layers/SplitterLayer.hpp5
-rw-r--r--src/armnn/layers/StackLayer.cpp6
-rw-r--r--src/armnn/layers/StackLayer.hpp5
-rw-r--r--src/armnn/layers/StandInLayer.cpp3
-rw-r--r--src/armnn/layers/StandInLayer.hpp5
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp6
-rw-r--r--src/armnn/layers/StridedSliceLayer.hpp5
-rw-r--r--src/armnn/layers/SubtractionLayer.hpp2
-rw-r--r--src/armnn/layers/SwitchLayer.cpp8
-rw-r--r--src/armnn/layers/SwitchLayer.hpp5
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp6
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp5
-rw-r--r--src/armnn/layers/TransposeLayer.cpp6
-rw-r--r--src/armnn/layers/TransposeLayer.hpp5
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp88
135 files changed, 432 insertions, 427 deletions
diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp
index d7ccbd4b57..44438b2f7c 100644
--- a/include/armnn/BackendOptions.hpp
+++ b/include/armnn/BackendOptions.hpp
@@ -11,6 +11,8 @@
namespace armnn
{
+struct BackendOptions;
+using NetworkOptions = std::vector<BackendOptions>;
/// Struct for the users to pass backend specific options
struct BackendOptions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index c0c52f974a..6a143b05fb 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -4,6 +4,7 @@
//
#pragma once
+#include <armnn/BackendOptions.hpp>
#include <armnn/Deprecated.hpp>
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/ILayerVisitor.hpp>
@@ -11,7 +12,6 @@
#include <armnn/Optional.hpp>
#include <armnn/TensorFwd.hpp>
#include <armnn/Types.hpp>
-#include <armnn/Deprecated.hpp>
#include <memory>
#include <vector>
@@ -105,8 +105,8 @@ using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
class INetwork
{
public:
- static INetwork* CreateRaw();
- static INetworkPtr Create();
+ static INetwork* CreateRaw(NetworkOptions networkOptions = {});
+ static INetworkPtr Create(NetworkOptions networkOptions = {});
static void Destroy(INetwork* network);
virtual Status PrintGraph() = 0;
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index cc3384748a..2a60072597 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -489,7 +489,7 @@ void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
subgraph.Clear();
}
-void Graph::InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod)
+void Graph::InferTensorInfos()
{
for (auto&& layer : TopologicalSort())
{
@@ -511,8 +511,12 @@ void Graph::InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod)
{
throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
}
+
+ if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
+ {
+ layer->ValidateTensorShapesFromInputs();
+ }
}
- layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
}
}
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 9673df49a0..87e0da826f 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -93,7 +93,11 @@ public:
const Graph& m_Graph;
};
- Graph() : m_LayersInOrder(true) {}
+ Graph(bool shapeInferenceMethod = false)
+ : m_LayersInOrder(true)
+ , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
+ ShapeInferenceMethod::ValidateOnly)
+ {}
Graph(const Graph& other);
@@ -200,7 +204,7 @@ public:
void SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer);
void SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph);
- void InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly);
+ void InferTensorInfos();
void AttachObservable(IGraphObservable* const observable, GraphEvent notifyOnEvent) {
m_Views[notifyOnEvent].emplace_back(observable);
@@ -260,6 +264,7 @@ private:
mutable bool m_LayersInOrder;
std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
+ ShapeInferenceMethod m_ShapeInferenceMethod;
};
/// Common base class for layers in the graph.
@@ -401,6 +406,8 @@ inline LayerT* Graph::AddLayer(Args&&... args)
((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
+ layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+
NotifyObservables(GraphEvent::LayerAdded, layer);
return layer;
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 692ee32acd..dc211b7f2f 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Layer.hpp"
@@ -67,6 +67,10 @@ const TensorInfo& OutputSlot::GetTensorInfo() const
bool OutputSlot::IsTensorInfoSet() const
{
+ if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
+ {
+ GetOwningLayer().ValidateTensorShapesFromInputs();
+ }
return GetOutputHandler().IsTensorInfoSet();
}
@@ -191,6 +195,7 @@ Layer::Layer(unsigned int numInputSlots,
DataLayout layout,
const char* name)
: m_OutputHandlers(numOutputSlots)
+, m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
, m_LayerName(name ? name : "")
, m_Type(type)
, m_BackendId()
@@ -354,18 +359,6 @@ void Layer::VerifyLayerConnections(unsigned int expectedConnections, const Check
% GetNameStr()
% location.AsString()));
}
- if(! GetInputSlot(i).GetConnection()->IsTensorInfoSet())
- {
- throw LayerValidationException(
- boost::str(
- boost::format(
- "TensorInfo of Input connection #%1% must be set on connected OutputSlot for "
- "%2% layer %3% %4%")
- % i
- % GetLayerTypeAsCString(this->GetType())
- % GetNameStr()
- % location.AsString()));
- }
}
}
@@ -448,15 +441,6 @@ void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInfere
outputShape.AreAllDimensionsSpecified(),
"Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
}
- else
- {
- if (outputShape.GetDimensionality() == Dimensionality::Specified)
- {
- ConditionalThrow<LayerValidationException>(
- !outputShape.AreAllDimensionsSpecified(),
- "No unspecified dimension while using ShapeInferenceMethod::InferAndValidate");
- }
- }
}
void Layer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index f1954b9d07..d4a24e4925 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -228,6 +228,8 @@ public:
return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
}
+ ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
+
const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
@@ -277,8 +279,7 @@ public:
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
- virtual void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) = 0;
+ virtual void ValidateTensorShapesFromInputs() = 0;
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
@@ -328,6 +329,11 @@ public:
}
Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
+ void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
+ {
+ m_ShapeInferenceMethod = shapeInferenceMethod;
+ }
+
protected:
// Graph needs access to the virtual destructor.
friend class Graph;
@@ -378,6 +384,7 @@ private:
protected:
std::vector<OutputHandler> m_OutputHandlers;
+ ShapeInferenceMethod m_ShapeInferenceMethod;
private:
const std::string m_LayerName;
@@ -396,6 +403,7 @@ private:
LayerGuid m_Guid;
std::list<std::string> m_RelatedLayerNames;
+
};
// A layer user-provided data can be bound to (e.g. inputs, outputs).
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index e0607bda33..132924a19a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -42,14 +42,14 @@
namespace armnn
{
-armnn::INetwork* INetwork::CreateRaw()
+armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
{
- return new Network();
+ return new Network(networkOptions);
}
-armnn::INetworkPtr INetwork::Create()
+armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
{
- return INetworkPtr(CreateRaw(), &INetwork::Destroy);
+ return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
}
void INetwork::Destroy(INetwork* network)
@@ -1147,11 +1147,19 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
return optNet;
}
-
-Network::Network()
-: m_Graph(std::make_unique<Graph>())
+bool Network::GetShapeInferenceMethod()
{
+ if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
+ {
+ return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
+ }
+
+ return false;
}
+Network::Network(NetworkOptions networkOptions)
+: m_NetworkOptions(networkOptions),
+ m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
+{}
Network::~Network()
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 6bc0ac7650..77d6b04919 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -28,7 +28,7 @@ class Graph;
class Network final : public INetwork
{
public:
- Network();
+ Network(NetworkOptions networkOptions = {});
~Network();
const Graph& GetGraph() const { return *m_Graph; }
@@ -269,6 +269,9 @@ private:
const Optional<ConstTensor>& biases,
const char* name);
+ bool GetShapeInferenceMethod();
+ NetworkOptions m_NetworkOptions;
+
std::unique_ptr<Graph> m_Graph;
};
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index ccee524858..e04fcbba1a 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -30,18 +30,18 @@ AbsLayer* AbsLayer::Clone(Graph& graph) const
return CloneBase<AbsLayer>(graph, GetName());
}
-void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void AbsLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "AbsLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
}
void AbsLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index ab31014e57..0e5ccb042a 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref AbsLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index c6443b6997..d3d02c3c19 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -28,19 +28,19 @@ ActivationLayer* ActivationLayer::Clone(Graph& graph) const
return CloneBase<ActivationLayer>(graph, m_Param, GetName());
}
-void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ActivationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ActivationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
}
void ActivationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 3f0d520c3c..5ffcc3e1f5 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -24,8 +24,7 @@ public:
/// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 4c80e5c03a..4af576a130 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index e288d16232..bd914ec245 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -69,19 +69,19 @@ std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<Ten
return std::vector<TensorShape>({ outputShape });
}
-void ArgMinMaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ArgMinMaxLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
}
void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 27cfb20edf..761d4a0a36 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,8 +32,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ArgMinMaxLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index fa589dbc75..625e0d472d 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -48,19 +48,19 @@ BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchNormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchNormalizationLayer");
}
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 88db81f8aa..3915897a52 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -37,8 +37,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref BatchNormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index fe99e9ebff..1a5cfa6647 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -41,19 +41,19 @@ BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape &outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchToSpaceNdLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchToSpaceNdLayer");
}
std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index b99dc36ce9..da7585b51e 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref BatchToSpaceNdLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 4dd3781bdd..a9639e8285 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -55,13 +55,13 @@ std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<Te
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ComparisonLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -69,7 +69,7 @@ void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeI
});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ComparisonLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
}
void ComparisonLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index e20bcdfb4f..bcb0dc2fdd 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s) will lead to a valid configuration
/// of @ref ComparisonLayer
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 4f0aa539a1..d9fffff57e 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -242,7 +242,7 @@ std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<Tensor
return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
}
-void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConcatLayer::ValidateTensorShapesFromInputs()
{
// Validates Concat layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -254,7 +254,7 @@ void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inputShapes;
for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
@@ -266,7 +266,7 @@ void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConcatLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
}
void ConcatLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 5bb11ba6e2..84eba2e7c9 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -35,8 +35,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConcatLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index ff4c57c431..cd8a056fb3 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -40,9 +40,8 @@ std::vector<TensorShape> ConstantLayer::InferOutputShapes(const std::vector<Tens
return std::vector<TensorShape>({ inputShapes[0] });
}
-void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConstantLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// Get the output shape from the value of the constant layer.
TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape();
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 23183d22fe..36fa1f96e9 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -28,8 +28,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConstantLayer
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 250ecfa133..81bb4d9f1b 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -30,19 +30,19 @@ ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
}
-void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertBf16ToFp32Layer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
}
void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index 136cfed479..d9df0bdf38 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index f86397fb01..709ca137f4 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -30,19 +30,19 @@ ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const
return CloneBase<ConvertFp16ToFp32Layer>(graph, GetName());
}
-void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertFp16ToFp32Layer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
}
void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index c4ac13b7e4..4eadb9f11a 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index 15052455e4..9b02b2f64b 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -30,20 +30,20 @@ ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const
return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName());
}
-void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 096dc7e0d8..57fbe13e12 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 0a126e2284..7b2df00e0b 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -29,20 +29,20 @@ ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
return CloneBase<ConvertFp32ToFp16Layer>(graph, GetName());
}
-void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index c8a5055cc3..5652a472a2 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -25,8 +25,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index a1535ea7cc..5fff982ca1 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -112,13 +112,13 @@ std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector
return std::vector<TensorShape>({ tensorShape });
}
-void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void Convolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// check if we m_Weight data is not nullptr
ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
@@ -129,7 +129,7 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sha
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Convolution2dLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
}
Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index e88b44da16..4dd1497fd8 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -34,8 +34,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref Convolution2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 40bc8fe54b..c29421fc08 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -34,20 +34,20 @@ DebugLayer* DebugLayer::Clone(Graph& graph) const
return CloneBase<DebugLayer>(graph, GetName());
}
-void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DebugLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DebugLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
}
void DebugLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index 227e056c7b..e71e05a8d5 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DebugLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 4b7c41d317..dae557ea7b 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -57,20 +57,20 @@ std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<
return std::vector<TensorShape>({ outputShape });
}
-void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthToSpaceLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
}
void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index a0ecdcffc0..0730d4d3ea 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DepthToSpaceLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 98d9e82f7f..8a7cf23bb7 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -119,13 +119,13 @@ DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& i
return std::vector<TensorShape>{ tensorShape };
}
-void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// on this level constant data should not be released..
ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
@@ -137,7 +137,7 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthwiseConvolution2dLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
}
Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 7b42a5fa59..dd0b0e6b88 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 79ef0cba18..f79888260a 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -29,20 +29,20 @@ DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const
return CloneBase<DequantizeLayer>(graph, GetName());
}
-void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DequantizeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DequantizeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
}
void DequantizeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index 24c9869f13..a5750ddaab 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DequantizeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index fddf86f573..b18781b1c0 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -34,13 +34,13 @@ DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// on this level constant data should not be released.
ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
@@ -53,21 +53,21 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMet
const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes });
const TensorShape& inferredNumberDetections = TensorShape({ 1 });
- ValidateAndCopyShape(outputShape, inferredDetectionBoxes, shapeInferenceMethod, "DetectionPostProcessLayer");
+ ValidateAndCopyShape(outputShape, inferredDetectionBoxes, m_ShapeInferenceMethod, "DetectionPostProcessLayer");
ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(),
inferredDetectionScores,
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"DetectionPostProcessLayer", 1);
ValidateAndCopyShape(GetOutputSlot(2).GetTensorInfo().GetShape(),
inferredDetectionScores,
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"DetectionPostProcessLayer", 2);
ValidateAndCopyShape(GetOutputSlot(3).GetTensorInfo().GetShape(),
inferredNumberDetections,
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"DetectionPostProcessLayer", 3);
}
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index d3c604f65c..374eef5ec5 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,8 +32,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DetectionPostProcessLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 0a9b9fe821..4427a4c4cb 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index e063293815..b4a3cea9e1 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -47,13 +47,13 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -62,7 +62,7 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod s
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
} // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 1f9888a821..3893dcd9f9 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,8 +19,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of the element wise operation.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 5592c2070e..cf4c2fc36b 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -40,19 +40,19 @@ std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vec
return std::vector<TensorShape>({ input });
}
-void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index ae88fcfb45..f6f8862da4 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,8 +32,7 @@ public:
/// Check if the input tensor shape(s) will lead to a valid configuration
/// of @ref ElementwiseUnaryLayer
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 3a1d0d1a50..ab41324061 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -29,19 +29,19 @@ FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const
return CloneBase<FakeQuantizationLayer>(graph, m_Param, GetName());
}
-void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FakeQuantizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
}
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index dc22c23485..09bd530f86 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FakeQuantizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 174fcf72bb..329a30a5bc 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -29,13 +29,13 @@ FillLayer* FillLayer::Clone(Graph& graph) const
return CloneBase<FillLayer>(graph, m_Param, GetName());
}
-void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FillLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index aa12fca711..eeed141128 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -25,8 +25,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FillLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 04d847ee10..5ff9a9a1c5 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -29,18 +29,18 @@ FloorLayer* FloorLayer::Clone(Graph& graph) const
return CloneBase<FloorLayer>(graph, GetName());
}
-void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FloorLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FloorLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
}
void FloorLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 68361d0a36..07cf151a8a 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FloorLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 174459b565..f10beda72b 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -61,13 +61,11 @@ std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ TensorShape({batches, weightShape[dimIdx]})});
}
-void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FullyConnectedLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
-
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// check if we m_Weight data is not nullptr
ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
@@ -78,7 +76,7 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sh
ARMNN_ASSERT(inferredShapes.size() == 1);
ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FullyConnectedLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FullyConnectedLayer");
}
Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index a2d075002a..bbacd2551d 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FullyConnectedLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index 52bf4324a2..e5d4a18967 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -29,13 +29,13 @@ GatherLayer* GatherLayer::Clone(Graph& graph) const
return CloneBase<GatherLayer>(graph, m_Param, GetName());
}
-void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void GatherLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& indices = GetInputSlot(1).GetConnection()->GetTensorInfo();
@@ -68,7 +68,7 @@ void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer
const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
- ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "GatherLayer");
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "GatherLayer");
}
void GatherLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index d8737adbee..010af37b49 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -27,8 +27,7 @@ public:
/// Check if the input tensor shape(s).
/// will lead to a valid configuration of @ref GatherLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index 5a78ecc981..0f96611792 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -28,12 +28,10 @@ InputLayer* InputLayer::Clone(Graph& graph) const
return CloneBase<InputLayer>(graph, GetBindingId(), GetName());
}
-void InputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void InputLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
-
//The input layer should already have it's inputs set during graph building phase in the driver/parser.
- ConditionalThrow<LayerValidationException>(GetOutputSlot(0).IsTensorInfoSet(),
+ ConditionalThrow<LayerValidationException>(GetOutputHandler(0).IsTensorInfoSet(),
"InputLayer should already have the TensorInfo set.");
}
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index 430abcb410..ff6b521bf0 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InputLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 4f753e21bf..eb6fe90767 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -29,19 +29,19 @@ InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) cons
return CloneBase<InstanceNormalizationLayer>(graph, m_Param, GetName());
}
-void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "InstanceNormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
}
void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index affc0281b1..799cf28f8c 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InstanceNormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index d4ac8019cf..ab2b094acf 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -29,19 +29,19 @@ L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const
return CloneBase<L2NormalizationLayer>(graph, m_Param, GetName());
}
-void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void L2NormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "L2NormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
}
void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 1c7e483068..5d58077ba8 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref L2NormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/LayerCloneBase.hpp b/src/armnn/layers/LayerCloneBase.hpp
index 3671d6642a..a8ff52b86a 100644
--- a/src/armnn/layers/LayerCloneBase.hpp
+++ b/src/armnn/layers/LayerCloneBase.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -17,6 +17,7 @@ LayerType* Layer::CloneBase(Graph& graph, Params&& ... params) const
layer->SetBackendId(GetBackendId());
layer->SetGuid(GetGuid());
+ layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
return layer;
}
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index f834ccef9d..1620acb166 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -29,18 +29,18 @@ LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const
return CloneBase<LogSoftmaxLayer>(graph, m_Param, GetName());
}
-void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LogSoftmaxLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
}
void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index a1907b9b57..b21bece98d 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,8 +27,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref LogSoftmaxLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 44f5d1f40b..724bd6b780 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -163,13 +163,13 @@ std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorSh
return outShapes;
}
-void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void LstmLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(3, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes( {
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -208,7 +208,7 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
}
else
{
@@ -219,7 +219,7 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
}
if (m_Param.m_ProjectionEnabled)
@@ -243,11 +243,11 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen
}
ValidateAndCopyShape(
- GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "LstmLayer", 1);
+ GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "LstmLayer", 1);
ValidateAndCopyShape(
- GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "LstmLayer", 2);
+ GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], m_ShapeInferenceMethod, "LstmLayer", 2);
ValidateAndCopyShape(
- GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], shapeInferenceMethod, "LstmLayer", 3);
+ GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], m_ShapeInferenceMethod, "LstmLayer", 3);
if (m_Param.m_LayerNormEnabled)
{
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index c7e4dd4583..51348d7015 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -97,8 +97,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref LstmLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index c90a30e72e..743f79b373 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index da635661e1..a1a3a40d95 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -38,13 +38,13 @@ MeanLayer* MeanLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MeanLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -97,7 +97,7 @@ void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen
}
const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data());
- ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "MeanLayer");
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
}
void MeanLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 3aacd59395..3a094bf6fe 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,8 +27,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MeanLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index b4fe68bd8d..854b4f669d 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -33,19 +33,19 @@ std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory&
return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
-void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MemCopyLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemCopyLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
}
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 10a9f55db3..996d6872d3 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MemCopyLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index a356f054dc..d9148fb579 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -33,19 +33,19 @@ std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory
return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
-void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MemImportLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemImportLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
}
void MemImportLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 9b9c88832c..1cbdaac00b 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MemImportLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index d8351c6c40..74a31a87b8 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -27,13 +27,13 @@ MergeLayer* MergeLayer::Clone(Graph& graph) const
return CloneBase<MergeLayer>(graph, GetName());
}
-void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MergeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -42,7 +42,7 @@ void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MergeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MergeLayer");
}
std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 3d0cf52c77..07f69004b5 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MergeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Infers the output shapes from given input shapes.
/// @param [in] inputShapes The input shapes layer has.
diff --git a/src/armnn/layers/MergerLayer.hpp b/src/armnn/layers/MergerLayer.hpp
index 32710609eb..8309d31b12 100644
--- a/src/armnn/layers/MergerLayer.hpp
+++ b/src/armnn/layers/MergerLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 03ca031828..2db06292fd 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 752765bfdc..692f40784c 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index e8176cec22..b75bb338cc 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -29,19 +29,19 @@ NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const
return CloneBase<NormalizationLayer>(graph, m_Param, GetName());
}
-void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void NormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "NormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
}
void NormalizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 25787a8693..00a4435527 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref NormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index db76244ad1..d14337fd11 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -29,9 +29,8 @@ OutputLayer* OutputLayer::Clone(Graph& graph) const
return CloneBase<OutputLayer>(graph, GetBindingId(), GetName());
}
-void OutputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void OutputLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// Just validates that the input is connected.
ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr,
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 26c5a0a21b..6315e25da1 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -38,8 +38,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref OutputLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 296bfa901e..4fcbc77c7b 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -36,9 +36,8 @@ PadLayer* PadLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void PadLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PadLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
return;
}
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index a15563d164..16cdbf57d4 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,8 +27,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PadLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index b9380aa44b..3c4d1ee096 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -40,19 +40,19 @@ std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<Tenso
return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
}
-void PermuteLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PermuteLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PermuteLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
}
void PermuteLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index 9af1d9b95f..67be2e1939 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -28,8 +28,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PermuteLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index bed49ee059..5411695492 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -100,19 +100,19 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
return std::vector<TensorShape>({ tensorShape });
}
-void Pooling2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void Pooling2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Pooling2dLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
}
void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 2a5703b8e9..90c9a44fbd 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref Pooling2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index e043fac432..afc9877928 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -35,9 +35,8 @@ std::unique_ptr<IWorkload> PreCompiledLayer::CreateWorkload(const armnn::IWorklo
return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
}
-void PreCompiledLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PreCompiledLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// NOTE: since the PreCompiledLayer is an internal layer created from a valid SubgraphView,
// we do not need to validate its input shapes
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index a4b1c78f12..a4851c778f 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,8 +29,7 @@ public:
PreCompiledLayer* Clone(Graph &graph) const override;
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index fcf50f2590..a57aa85147 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -94,13 +94,13 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
return { outputShape };
}
-void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PreluLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes(
{
@@ -110,7 +110,7 @@ void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PreluLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
}
void PreluLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 6febdf9f39..511be29d17 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PreluLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 321d985fed..4d0d57cc49 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -165,13 +165,13 @@ std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorS
return outShapes;
}
-void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QLstmLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(3, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes(
{
@@ -211,7 +211,7 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
}
else
{
@@ -223,7 +223,7 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
}
if (m_Param.m_ProjectionEnabled)
@@ -247,9 +247,9 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
}
ValidateAndCopyShape(
- GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "QLstmLayer", 1);
+ GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "QLstmLayer", 1);
ValidateAndCopyShape(
- GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "QLstmLayer", 2);
+ GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], m_ShapeInferenceMethod, "QLstmLayer", 2);
if (m_Param.m_LayerNormEnabled)
{
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 017893319b..5757ef6559 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -99,8 +99,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref QLstmLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 990d2b4b88..aad6dd87bf 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -29,17 +29,17 @@ Layer* QuantizeLayer::Clone(Graph& graph) const
return clone;
}
-void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QuantizeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
}
void QuantizeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index a223f59470..2f331a493c 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,8 +21,7 @@ public:
Layer* Clone(Graph& graph) const override;
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 432d50dc26..ad227618a9 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -91,13 +91,13 @@ std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector
return outShapes;
}
-void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(3, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes(
{
@@ -137,11 +137,11 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sha
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
// Check output TensorShape(s) match inferred shape
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizedLstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizedLstmLayer");
ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(),
inferredShapes[1],
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"QuantizedLstmLayer",
1);
}
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 1353a06d9f..bfe86a4629 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -61,8 +61,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref QuantizedLstmLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 62f77df16f..2e70134126 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -29,16 +29,15 @@ Layer* RankLayer::Clone(Graph& graph) const
return clone;
}
-void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void RankLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
const TensorShape inferredShape = TensorShape(Dimensionality::Scalar);
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
- ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "RankLayer");
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
}
void RankLayer::Accept(ILayerVisitor& visitor) const
{
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index e160d60354..f4f1ec9e66 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -20,7 +20,7 @@ class RankLayer : public Layer
Layer* Clone(Graph& graph) const override;
- void ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3f955a57b0..526531604b 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -36,19 +36,19 @@ std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<Tenso
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
-void ReshapeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ReshapeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ReshapeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
}
void ReshapeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index 4f0300a676..78335e6a1a 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -28,8 +28,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ReshapeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index ab8430ac00..53af5f9524 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -58,19 +58,19 @@ std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<Tensor
return std::vector<TensorShape>({ tensorShape });
}
-void ResizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ResizeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ResizeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
}
void ResizeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index 0adda942cf..34625857f8 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ResizeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index a68b5a4766..e85d865675 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -30,19 +30,19 @@ RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const
return CloneBase<RsqrtLayer>(graph, GetName());
}
-void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void RsqrtLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "RsqrtLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
}
void RsqrtLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index d4183ef70e..4fcbf72120 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref RsqrtLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index a31f6037e1..0f1d4386d7 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -33,19 +33,19 @@ SliceLayer* SliceLayer::Clone(Graph& graph) const
return CloneBase<SliceLayer>(graph, m_Param, GetName());
}
-void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SliceLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SliceLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SliceLayer");
}
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index 3d9a7feee5..0505a056c5 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SliceLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 3da2cb2b00..32d3a1117c 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -29,19 +29,19 @@ SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const
return CloneBase<SoftmaxLayer>(graph, m_Param, GetName());
}
-void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SoftmaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SoftmaxLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
}
void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index 84aae85000..cbdd7c58f9 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SoftmaxLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index 4eba06691c..decb6e61f0 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -66,20 +66,20 @@ std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ outputShape });
}
-void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToBatchNdLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}
void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 707017b5a8..28857d8aba 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 4695d812bc..72d82308d7 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -59,20 +59,20 @@ std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<
return std::vector<TensorShape>({ outputShape });
}
-void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToDepthLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
}
void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index ca0d804320..a8bc1089a3 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SpaceToDepthLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 60dc9611e8..2d469b0bc9 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -139,11 +139,11 @@ std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<Tens
return outShapes;
}
-void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SplitterLayer::ValidateTensorShapesFromInputs()
{
std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
{
- VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), shapeInferenceMethod);
+ VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), m_ShapeInferenceMethod);
});
std::vector<TensorShape> views;
@@ -161,7 +161,7 @@ void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInf
{
ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
inferredShapes[viewIdx],
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"SplitterLayer",
viewIdx);
}
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 39aab90853..bd20890162 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -36,8 +36,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SplitterLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index f5d761bdc5..715057615d 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -58,7 +58,7 @@ std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ targetShape });
}
-void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StackLayer::ValidateTensorShapesFromInputs()
{
// Validates Stack layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -70,7 +70,7 @@ void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// Constructs and validates input shapes
std::vector<TensorShape> inputShapes;
@@ -90,7 +90,7 @@ void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StackLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
}
void StackLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 5e937db43a..3d05da0bf6 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref StackLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index 623f4a5b3f..6281f3e51e 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -34,9 +34,8 @@ std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<Tenso
throw Exception("Stand in layer does not support infering output shapes");
}
-void StandInLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StandInLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// Cannot validate this layer since no implementation details can be known by the framework
// so do nothing here.
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index a7e4a2c400..2864753efa 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,8 +27,7 @@ public:
/// Check if the input tensor shape(s)
/// Does nothing since cannot validate any properties of this layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Empty implementation that throws Exception if called.
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index fc9df856ec..9b1706b335 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -94,19 +94,19 @@ std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
TensorShape(boost::numeric_cast<unsigned int>(outputShape.size()), &outputShape[0]) });
}
-void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StridedSliceLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StridedSliceLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
}
void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index f9ba7e2921..35ac3709da 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -32,8 +32,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref StridedSliceLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 21619f890d..527b50bcad 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index b763b0804c..d905f5248a 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -27,13 +27,13 @@ SwitchLayer* SwitchLayer::Clone(Graph& graph) const
return CloneBase<SwitchLayer>(graph, GetName());
}
-void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SwitchLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
@@ -44,10 +44,10 @@ void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer
ARMNN_ASSERT(inferredShapes.size() == 2);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SwitchLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SwitchLayer");
ValidateAndCopyShape(
- GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "SwitchLayer", 1);
+ GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
}
void SwitchLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index 70223487b9..025f379c99 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -26,8 +26,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SwitchLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 7074be9659..92873899b7 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -101,13 +101,13 @@ std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
return std::vector<TensorShape>({ tensorShape });
}
-void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
@@ -127,7 +127,7 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM
ARMNN_ASSERT(expectedOutputShape.size() == 1);
- ValidateAndCopyShape(outputShape, expectedOutputShape[0], shapeInferenceMethod, "TransposeConvolution2dLayer");
+ ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
}
Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index ecdf7dc1a6..1ee984d231 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -33,8 +33,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref TransposeConvolution2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Infers the output shapes from given input shapes and layer properties.
/// @param [in] inputShapes The input shapes the layer has.
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 7dfb003019..61e6863304 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -40,19 +40,19 @@ std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<Ten
return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
}
-void TransposeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void TransposeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "TransposeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
}
void TransposeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index 3e94a9f4d8..a4245242ed 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -27,8 +27,7 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref TransposeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Infers the output shapes from given input shapes and the permutation vector.
/// @param [in] inputShapes The input shapes layer has.
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 21df1f0e13..25b0feaded 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -60,7 +60,7 @@ void RunShapeInferenceTest(LayerT* const layer,
const unsigned int outputSize = layer->GetNumOutputSlots();
- const auto runTestWithMask = [&](const bool maskPermutations[], ShapeInferenceMethod shapeInferenceMethod)
+ const auto runTestWithMask = [&](const bool maskPermutations[])
{
for (unsigned int i = 0; i < outputSize; ++i)
{
@@ -68,7 +68,7 @@ void RunShapeInferenceTest(LayerT* const layer,
DataType::Float32});
}
- layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
+ layer->ValidateTensorShapesFromInputs();
for (unsigned int i = 0; i < outputSize; ++i)
{
@@ -82,10 +82,12 @@ void RunShapeInferenceTest(LayerT* const layer,
layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
}
- BOOST_CHECK_THROW(
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException);
+ layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate);
+ BOOST_CHECK_THROW(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+
+ layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
+ layer->ValidateTensorShapesFromInputs();
for (unsigned int i = 0; i < outputSize; ++i)
{
@@ -93,16 +95,13 @@ void RunShapeInferenceTest(LayerT* const layer,
}
// Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
- for (unsigned int i = 0; i <= numDimensions[0]; ++i)
+ for (unsigned int i = 0; i < numDimensions[0]; ++i)
{
- runTestWithMask(maskPermutations[i], ShapeInferenceMethod::InferAndValidate);
+ runTestWithMask(maskPermutations[i]);
}
// maskPermutations[5] equates to all dimensions being known
- runTestWithMask(maskPermutations[5], ShapeInferenceMethod::ValidateOnly);
-
- BOOST_CHECK_THROW(
- runTestWithMask(maskPermutations[5], ShapeInferenceMethod::InferAndValidate), LayerValidationException);
+ runTestWithMask(maskPermutations[5]);
}
template<typename LayerT, typename... Args>
@@ -110,13 +109,68 @@ void CreateGraphAndRunTest(const std::vector<TensorShape>& inputShapes,
const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
Args &&... args)
{
- Graph graph;
+ Graph graph(true);
auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
}
+BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
+{
+ BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
+ {
+ { "InferAndValidate", true }
+ });
+
+ INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
+ TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
+
+ auto inputLayer = network->AddInputLayer(1, "inputLayer");
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ ActivationDescriptor descriptor;
+ descriptor.m_Function = ActivationFunction::Abs;
+ auto activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+ inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+ BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+
+ BOOST_CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
+
+
+ ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
+ {
+ { "InferAndValidate", false }
+ });
+
+ network = INetwork::Create({ShapeInferenceMethodOption});
+
+ inputLayer = network->AddInputLayer(1, "inputLayer");
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+ inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+ BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+
+ network = INetwork::Create();
+
+ inputLayer = network->AddInputLayer(1, "inputLayer");
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+ inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+ BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+}
+
BOOST_AUTO_TEST_CASE(AbsTest)
{
ActivationDescriptor descriptor;
@@ -190,7 +244,7 @@ BOOST_AUTO_TEST_CASE(ConstantTesst)
layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly);
+ layer->ValidateTensorShapesFromInputs();
BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
}
@@ -564,15 +618,17 @@ BOOST_AUTO_TEST_CASE(RankTest)
layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
BOOST_CHECK_THROW(
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException);
+ layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+
+ layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate);
+ layer->ValidateTensorShapesFromInputs();
BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly);
+ layer->ValidateTensorShapesFromInputs();
BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
}