aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2020-06-09 18:00:20 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-06-26 12:00:16 +0000
commitcdc0149ffe40f14ff4695149d9bdf551f8e07702 (patch)
tree0d0d34c0c3332d2e78f6272de536ae8c4dd809bc
parent1db8b822324c4d2c3e55cf3966cfae36757793b5 (diff)
downloadarmnn-cdc0149ffe40f14ff4695149d9bdf551f8e07702.tar.gz
IVGCVSW-4928 Introduce "ShapeInferenceMethod" Option.
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I70ef1a9f3cefa1d4cf9220f0e13131d11e7c6418
-rw-r--r--include/armnn/INetwork.hpp8
-rw-r--r--include/armnn/Types.hpp16
-rw-r--r--src/armnn/Graph.cpp4
-rw-r--r--src/armnn/Graph.hpp2
-rw-r--r--src/armnn/Layer.hpp3
-rw-r--r--src/armnn/layers/AbsLayer.cpp4
-rw-r--r--src/armnn/layers/AbsLayer.hpp4
-rw-r--r--src/armnn/layers/ActivationLayer.cpp4
-rw-r--r--src/armnn/layers/ActivationLayer.hpp4
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp4
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp4
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp4
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp4
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.hpp4
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp4
-rw-r--r--src/armnn/layers/ComparisonLayer.hpp4
-rw-r--r--src/armnn/layers/ConcatLayer.cpp4
-rw-r--r--src/armnn/layers/ConcatLayer.hpp4
-rw-r--r--src/armnn/layers/ConstantLayer.cpp4
-rw-r--r--src/armnn/layers/ConstantLayer.hpp4
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp4
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.hpp4
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp4
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.hpp4
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp4
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.hpp4
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp4
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.hpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/Convolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/DebugLayer.cpp4
-rw-r--r--src/armnn/layers/DebugLayer.hpp4
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp4
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.hpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp4
-rw-r--r--src/armnn/layers/DequantizeLayer.hpp4
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp4
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.hpp4
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp4
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.hpp4
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp4
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.hpp4
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp4
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.hpp4
-rw-r--r--src/armnn/layers/FillLayer.cpp4
-rw-r--r--src/armnn/layers/FillLayer.hpp4
-rw-r--r--src/armnn/layers/FloorLayer.cpp4
-rw-r--r--src/armnn/layers/FloorLayer.hpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp4
-rw-r--r--src/armnn/layers/FullyConnectedLayer.hpp4
-rw-r--r--src/armnn/layers/GatherLayer.cpp4
-rw-r--r--src/armnn/layers/GatherLayer.hpp4
-rw-r--r--src/armnn/layers/InputLayer.cpp4
-rw-r--r--src/armnn/layers/InputLayer.hpp4
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp4
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp4
-rw-r--r--src/armnn/layers/L2NormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp4
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.hpp4
-rw-r--r--src/armnn/layers/LstmLayer.cpp4
-rw-r--r--src/armnn/layers/LstmLayer.hpp4
-rw-r--r--src/armnn/layers/MeanLayer.cpp4
-rw-r--r--src/armnn/layers/MeanLayer.hpp4
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp4
-rw-r--r--src/armnn/layers/MemCopyLayer.hpp4
-rw-r--r--src/armnn/layers/MemImportLayer.cpp4
-rw-r--r--src/armnn/layers/MemImportLayer.hpp4
-rw-r--r--src/armnn/layers/MergeLayer.cpp4
-rw-r--r--src/armnn/layers/MergeLayer.hpp4
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp4
-rw-r--r--src/armnn/layers/NormalizationLayer.hpp4
-rw-r--r--src/armnn/layers/OutputLayer.cpp4
-rw-r--r--src/armnn/layers/OutputLayer.hpp4
-rw-r--r--src/armnn/layers/PadLayer.cpp4
-rw-r--r--src/armnn/layers/PadLayer.hpp4
-rw-r--r--src/armnn/layers/PermuteLayer.cpp4
-rw-r--r--src/armnn/layers/PermuteLayer.hpp4
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp4
-rw-r--r--src/armnn/layers/Pooling2dLayer.hpp4
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp4
-rw-r--r--src/armnn/layers/PreCompiledLayer.hpp3
-rw-r--r--src/armnn/layers/PreluLayer.cpp4
-rw-r--r--src/armnn/layers/PreluLayer.hpp4
-rw-r--r--src/armnn/layers/QLstmLayer.cpp4
-rw-r--r--src/armnn/layers/QLstmLayer.hpp4
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp4
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp3
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp4
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.hpp4
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp4
-rw-r--r--src/armnn/layers/ReshapeLayer.hpp4
-rw-r--r--src/armnn/layers/ResizeLayer.cpp4
-rw-r--r--src/armnn/layers/ResizeLayer.hpp4
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp4
-rw-r--r--src/armnn/layers/RsqrtLayer.hpp4
-rw-r--r--src/armnn/layers/SliceLayer.cpp4
-rw-r--r--src/armnn/layers/SliceLayer.hpp4
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp4
-rw-r--r--src/armnn/layers/SoftmaxLayer.hpp4
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp4
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.hpp4
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp4
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.hpp4
-rw-r--r--src/armnn/layers/SplitterLayer.cpp4
-rw-r--r--src/armnn/layers/SplitterLayer.hpp4
-rw-r--r--src/armnn/layers/StackLayer.cpp4
-rw-r--r--src/armnn/layers/StackLayer.hpp4
-rw-r--r--src/armnn/layers/StandInLayer.cpp4
-rw-r--r--src/armnn/layers/StandInLayer.hpp4
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp4
-rw-r--r--src/armnn/layers/StridedSliceLayer.hpp4
-rw-r--r--src/armnn/layers/SwitchLayer.cpp4
-rw-r--r--src/armnn/layers/SwitchLayer.hpp4
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp4
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.hpp4
-rw-r--r--src/armnn/layers/TransposeLayer.cpp4
-rw-r--r--src/armnn/layers/TransposeLayer.hpp4
121 files changed, 374 insertions, 121 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index ade6c52c90..49cd582e67 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -599,12 +599,15 @@ struct OptimizerOptions
: m_ReduceFp32ToFp16(false)
, m_Debug(false)
, m_ReduceFp32ToBf16(false)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
{}
- OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false)
+ OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
+ ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly)
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(shapeInferenceMethod)
{
if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
{
@@ -620,6 +623,9 @@ struct OptimizerOptions
// Reduce Fp32 data to Bf16 for faster processing
bool m_ReduceFp32ToBf16;
+
+ // Infer output size when not available
+ ShapeInferenceMethod m_shapeInferenceMethod;
};
/// Create an optimized version of the network
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 7c8a533e60..fb6f134766 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -143,6 +143,22 @@ enum class OutputShapeRounding
Ceiling = 1
};
+///
+/// The ShapeInferenceMethod modify how the output shapes are treated.
+/// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer
+/// and any mismatch is reported.
+/// When InferAndValidate is selected 2 actions must be performed: (1)infer output shape from inputs and (2)validate the
+/// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not
+/// specified explicitly, however this information can be calculated from the inputs.
+///
+enum class ShapeInferenceMethod
+{
+ /// Validate all output shapes
+ ValidateOnly = 0,
+ /// Infer missing output shapes and validate all output shapes
+ InferAndValidate = 1
+};
+
/// Each backend should implement an IBackend.
class IBackend
{
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index bd0cb34d57..cc3384748a 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -489,7 +489,7 @@ void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
subgraph.Clear();
}
-void Graph::InferTensorInfos()
+void Graph::InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod)
{
for (auto&& layer : TopologicalSort())
{
@@ -512,7 +512,7 @@ void Graph::InferTensorInfos()
throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
}
}
- layer->ValidateTensorShapesFromInputs();
+ layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
}
}
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index ae2d1ee550..9673df49a0 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -200,7 +200,7 @@ public:
void SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer);
void SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph);
- void InferTensorInfos();
+ void InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly);
void AttachObservable(IGraphObservable* const observable, GraphEvent notifyOnEvent) {
m_Views[notifyOnEvent].emplace_back(observable);
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 59475231a8..303de052fc 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -277,7 +277,8 @@ public:
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
- virtual void ValidateTensorShapesFromInputs() = 0;
+ virtual void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) = 0;
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 490b03ed79..6f7141551e 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -30,8 +30,10 @@ AbsLayer* AbsLayer::Clone(Graph& graph) const
return CloneBase<AbsLayer>(graph, GetName());
}
-void AbsLayer::ValidateTensorShapesFromInputs()
+void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index 6dc55b4542..ab31014e57 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref AbsLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index d310b7efbc..1aed59b781 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -28,8 +28,10 @@ ActivationLayer* ActivationLayer::Clone(Graph& graph) const
return CloneBase<ActivationLayer>(graph, m_Param, GetName());
}
-void ActivationLayer::ValidateTensorShapesFromInputs()
+void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 46845e2918..3f0d520c3c 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -23,7 +23,9 @@ public:
ActivationLayer* Clone(Graph& graph) const override;
/// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index a9907871be..288192f98d 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -69,8 +69,10 @@ std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<Ten
return std::vector<TensorShape>({ outputShape });
}
-void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
+void ArgMinMaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 2d7d223d7a..27cfb20edf 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -31,7 +31,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ArgMinMaxLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 7f61cad40f..9fcc30cbba 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -48,8 +48,10 @@ BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
+void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 14e6a17413..88db81f8aa 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -36,7 +36,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref BatchNormalizationLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 1da88c63ac..8341b85dfd 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -41,8 +41,10 @@ BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
+void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index 5d568cb32a..b99dc36ce9 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref BatchToSpaceNdLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 91080457bf..483d2e1c72 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -55,8 +55,10 @@ std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<Te
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ComparisonLayer::ValidateTensorShapesFromInputs()
+void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index edc66b6cf7..e20bcdfb4f 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s) will lead to a valid configuration
/// of @ref ComparisonLayer
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index b51303b7ee..9a0672b38d 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -242,8 +242,10 @@ std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<Tensor
return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
}
-void ConcatLayer::ValidateTensorShapesFromInputs()
+void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
// Validates Concat layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConcatLayer: Num Inputs must match num views.",
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 0d540086d7..5bb11ba6e2 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -34,7 +34,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConcatLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 136616c204..d354accd2f 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -40,8 +40,10 @@ std::vector<TensorShape> ConstantLayer::InferOutputShapes(const std::vector<Tens
return std::vector<TensorShape>({ inputShapes[0] });
}
-void ConstantLayer::ValidateTensorShapesFromInputs()
+void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
// Get the output shape from the value of the constant layer.
TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape();
ConditionalThrowIfNotEqual<LayerValidationException>(
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 9525522b54..23183d22fe 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -27,7 +27,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConstantLayer
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 30d20b87d6..b53986a497 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -30,8 +30,10 @@ ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
}
-void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
+void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index b419e5c2a3..136cfed479 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 08f0e4a8c1..30f9e6340e 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -30,8 +30,10 @@ ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const
return CloneBase<ConvertFp16ToFp32Layer>(graph, GetName());
}
-void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
+void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index e3b798beec..c4ac13b7e4 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index c9e0962dd5..9f523ae417 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -30,8 +30,10 @@ ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const
return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName());
}
-void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
+void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 225b03314e..096dc7e0d8 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 95403e9e75..7ff98ed898 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -29,8 +29,10 @@ ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
return CloneBase<ConvertFp32ToFp16Layer>(graph, GetName());
}
-void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
+void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index 8bb28f84ad..c8a5055cc3 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -24,7 +24,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index d82908a128..462d3554d3 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -112,8 +112,10 @@ std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector
return std::vector<TensorShape>({ tensorShape });
}
-void Convolution2dLayer::ValidateTensorShapesFromInputs()
+void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
// check if we m_Weight data is not nullptr
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index bd30826823..e88b44da16 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -33,7 +33,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref Convolution2dLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 6aaf945878..3422de6ad3 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -34,8 +34,10 @@ DebugLayer* DebugLayer::Clone(Graph& graph) const
return CloneBase<DebugLayer>(graph, GetName());
}
-void DebugLayer::ValidateTensorShapesFromInputs()
+void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index d50d6185a4..227e056c7b 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DebugLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 2d13271c77..a3344841c9 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -57,8 +57,10 @@ std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<
return std::vector<TensorShape>({ outputShape });
}
-void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
+void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index 53ef6e324e..a0ecdcffc0 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DepthToSpaceLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index dc6b2c2fe7..7efb307091 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -119,8 +119,10 @@ DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& i
return std::vector<TensorShape>{ tensorShape };
}
-void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
+void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
// on this level constant data should not be released..
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 67b6da23e3..7b42a5fa59 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 5b57279c43..e0c3d0ec16 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -29,8 +29,10 @@ DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const
return CloneBase<DequantizeLayer>(graph, GetName());
}
-void DequantizeLayer::ValidateTensorShapesFromInputs()
+void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index c112b6026e..24c9869f13 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DequantizeLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index e8d14d928c..2deca322ce 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -34,8 +34,10 @@ DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
+void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
// on this level constant data should not be released.
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index a6eab116ff..d3c604f65c 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -31,7 +31,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DetectionPostProcessLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 2c1e8717f4..44bbd0b839 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -47,8 +47,10 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 4f151b7f48..1f9888a821 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -18,7 +18,9 @@ class ElementwiseBaseLayer : public Layer
public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of the element wise operation.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index c91057cc9f..0908f391fb 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -40,8 +40,10 @@ std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vec
return std::vector<TensorShape>({ input });
}
-void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
+void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index 850a814b6e..ae88fcfb45 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -31,7 +31,9 @@ public:
/// Check if the input tensor shape(s) will lead to a valid configuration
/// of @ref ElementwiseUnaryLayer
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 2b4ad8605f..7ed6d7527b 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -29,8 +29,10 @@ FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const
return CloneBase<FakeQuantizationLayer>(graph, m_Param, GetName());
}
-void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
+void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index 36c360f728..dc22c23485 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FakeQuantizationLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index eb9f6af800..688486b368 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -29,8 +29,10 @@ FillLayer* FillLayer::Clone(Graph& graph) const
return CloneBase<FillLayer>(graph, m_Param, GetName());
}
-void FillLayer::ValidateTensorShapesFromInputs()
+void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index b9a972a27a..aa12fca711 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -24,7 +24,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FillLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index fb918f6e7a..9e46ebf5c2 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -29,8 +29,10 @@ FloorLayer* FloorLayer::Clone(Graph& graph) const
return CloneBase<FloorLayer>(graph, GetName());
}
-void FloorLayer::ValidateTensorShapesFromInputs()
+void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index e5b30d1ffb..68361d0a36 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FloorLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 4bbc9ba890..bd947b7678 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -61,8 +61,10 @@ std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ TensorShape({batches, weightShape[dimIdx]})});
}
-void FullyConnectedLayer::ValidateTensorShapesFromInputs()
+void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
// check if we m_Weight data is not nullptr
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index 7f03cc2ffe..a2d075002a 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FullyConnectedLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index c276d8258f..a99913073f 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -29,8 +29,10 @@ GatherLayer* GatherLayer::Clone(Graph& graph) const
return CloneBase<GatherLayer>(graph, GetName());
}
-void GatherLayer::ValidateTensorShapesFromInputs()
+void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo();
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index 08629d53c8..598ca44dc4 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -26,7 +26,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref GatherLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index 84cc43c667..42ce153346 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -28,8 +28,10 @@ InputLayer* InputLayer::Clone(Graph& graph) const
return CloneBase<InputLayer>(graph, GetBindingId(), GetName());
}
-void InputLayer::ValidateTensorShapesFromInputs()
+void InputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
//The input layer should already have it's inputs set during graph building phase in the driver/parser.
ConditionalThrow<LayerValidationException>(GetOutputSlot(0).IsTensorInfoSet(),
"InputLayer should already have the TensorInfo set.");
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index 64138fd3cf..430abcb410 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InputLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 25b133acda..f79e423c8c 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -29,8 +29,10 @@ InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) cons
return CloneBase<InstanceNormalizationLayer>(graph, m_Param, GetName());
}
-void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
+void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index 2b59b0d23a..affc0281b1 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InstanceNormalizationLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index e6d5f064f3..4a2945efbe 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -29,8 +29,10 @@ L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const
return CloneBase<L2NormalizationLayer>(graph, m_Param, GetName());
}
-void L2NormalizationLayer::ValidateTensorShapesFromInputs()
+void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index be506b7d1a..1c7e483068 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref L2NormalizationLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 627aa4cdd3..ab05fd24f3 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -29,8 +29,10 @@ LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const
return CloneBase<LogSoftmaxLayer>(graph, m_Param, GetName());
}
-void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
+void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index 732e47e4cf..a1907b9b57 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -26,7 +26,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref LogSoftmaxLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 653b18a1c9..af708e4e06 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -163,8 +163,10 @@ std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorSh
return outShapes;
}
-void LstmLayer::ValidateTensorShapesFromInputs()
+void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(3, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes( {
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index 5ccb4bcf92..c7e4dd4583 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -96,7 +96,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref LstmLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index 5fa88f9398..b39268c7ca 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -38,8 +38,10 @@ MeanLayer* MeanLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void MeanLayer::ValidateTensorShapesFromInputs()
+void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index b7c5ed3720..3aacd59395 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -26,7 +26,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MeanLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index e4009de022..c087290682 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -33,8 +33,10 @@ std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory&
return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
-void MemCopyLayer::ValidateTensorShapesFromInputs()
+void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index d466d0e1c8..10a9f55db3 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MemCopyLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index bcccba1f4a..02092f4b4c 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -33,8 +33,10 @@ std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory
return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
-void MemImportLayer::ValidateTensorShapesFromInputs()
+void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 452e5e38f9..9b9c88832c 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MemImportLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index ad7d8b1416..b05eb68d25 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -27,8 +27,10 @@ MergeLayer* MergeLayer::Clone(Graph& graph) const
return CloneBase<MergeLayer>(graph, GetName());
}
-void MergeLayer::ValidateTensorShapesFromInputs()
+void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 145284475c..3d0cf52c77 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MergeLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// Infers the output shapes from given input shapes.
/// @param [in] inputShapes The input shapes layer has.
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 44179fd534..9011ece821 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -29,8 +29,10 @@ NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const
return CloneBase<NormalizationLayer>(graph, m_Param, GetName());
}
-void NormalizationLayer::ValidateTensorShapesFromInputs()
+void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 8ba3f53d48..25787a8693 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref NormalizationLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index f00e0a5259..5ff91880f8 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -29,8 +29,10 @@ OutputLayer* OutputLayer::Clone(Graph& graph) const
return CloneBase<OutputLayer>(graph, GetBindingId(), GetName());
}
-void OutputLayer::ValidateTensorShapesFromInputs()
+void OutputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
// Just validates that the input is connected.
ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr,
"OutputLayer: Input slot must be connected.");
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 89bcfd6bb6..26c5a0a21b 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -37,7 +37,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref OutputLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 4fab88e615..690318d308 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -36,8 +36,10 @@ PadLayer* PadLayer::Clone(Graph& graph) const
return std::move(layer);
}
-void PadLayer::ValidateTensorShapesFromInputs()
+void PadLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
return;
}
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index f3cfb000bf..a15563d164 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -26,7 +26,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PadLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index e565b48b57..a585e66538 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -40,8 +40,10 @@ std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<Tenso
return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
}
-void PermuteLayer::ValidateTensorShapesFromInputs()
+void PermuteLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index f2057d48e9..9af1d9b95f 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -27,7 +27,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PermuteLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index ad2c82f761..defed9758b 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -100,8 +100,10 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
return std::vector<TensorShape>({ tensorShape });
}
-void Pooling2dLayer::ValidateTensorShapesFromInputs()
+void Pooling2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 2563eb130b..2a5703b8e9 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref Pooling2dLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 3444afc454..577e19f3cc 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -35,8 +35,10 @@ std::unique_ptr<IWorkload> PreCompiledLayer::CreateWorkload(const armnn::IWorklo
return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
}
-void PreCompiledLayer::ValidateTensorShapesFromInputs()
+void PreCompiledLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
// NOTE: since the PreCompiledLayer is an internal layer created from a valid SubgraphView,
// we do not need to validate its input shapes
}
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index 1a87f61e5b..a4b1c78f12 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -29,7 +29,8 @@ public:
PreCompiledLayer* Clone(Graph &graph) const override;
- void ValidateTensorShapesFromInputs() override;
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 609480673b..2527cb91be 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -94,8 +94,10 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
return { outputShape };
}
-void PreluLayer::ValidateTensorShapesFromInputs()
+void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes(
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 2f2704bf73..6febdf9f39 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PreluLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 7e6154817c..c5155d7bf9 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -165,8 +165,10 @@ std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorS
return outShapes;
}
-void QLstmLayer::ValidateTensorShapesFromInputs()
+void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(3, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes(
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 2d40b7e29e..017893319b 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -98,7 +98,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref QLstmLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 701041f4b3..5cfac25e4b 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -29,8 +29,10 @@ Layer* QuantizeLayer::Clone(Graph& graph) const
return clone;
}
-void QuantizeLayer::ValidateTensorShapesFromInputs()
+void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index 32cd53f810..a223f59470 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -21,7 +21,8 @@ public:
Layer* Clone(Graph& graph) const override;
- void ValidateTensorShapesFromInputs() override;
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index b56ae3ff52..e26857e01e 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -91,8 +91,10 @@ std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector
return outShapes;
}
-void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
+void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(3, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes(
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 9e0186fc71..1353a06d9f 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -60,7 +60,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref QuantizedLstmLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index b496dbb642..0257ca9b15 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -36,8 +36,10 @@ std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<Tenso
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
-void ReshapeLayer::ValidateTensorShapesFromInputs()
+void ReshapeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ });
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index 5e0e883822..4f0300a676 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -27,7 +27,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ReshapeLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index b16adeb860..b07eb9a7d6 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -58,8 +58,10 @@ std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<Tensor
return std::vector<TensorShape>({ tensorShape });
}
-void ResizeLayer::ValidateTensorShapesFromInputs()
+void ResizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index 9ad4910cec..0adda942cf 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ResizeLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index dfd466dca3..b3aecb27a5 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -30,8 +30,10 @@ RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const
return CloneBase<RsqrtLayer>(graph, GetName());
}
-void RsqrtLayer::ValidateTensorShapesFromInputs()
+void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index 1e51cc04ad..d4183ef70e 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref RsqrtLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index d92ed6fc48..2aa32e3b44 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -33,8 +33,10 @@ SliceLayer* SliceLayer::Clone(Graph& graph) const
return CloneBase<SliceLayer>(graph, m_Param, GetName());
}
-void SliceLayer::ValidateTensorShapesFromInputs()
+void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index abfe472194..3d9a7feee5 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SliceLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 738347c1b3..56c3792c1b 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -29,8 +29,10 @@ SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const
return CloneBase<SoftmaxLayer>(graph, m_Param, GetName());
}
-void SoftmaxLayer::ValidateTensorShapesFromInputs()
+void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index 839170e9b0..84aae85000 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SoftmaxLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index ce48b5b5c2..fbc3ca755d 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -66,8 +66,10 @@ std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vecto
return std::vector<TensorShape>({ outputShape });
}
-void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
+void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index cb8162f7cd..707017b5a8 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index bf65240e0c..5c8e2d4551 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -59,8 +59,10 @@ std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<
return std::vector<TensorShape>({ outputShape });
}
-void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
+void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
std::vector<TensorShape> inferredShapes = InferOutputShapes({
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index 799c36754d..ca0d804320 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SpaceToDepthLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 8ec8121495..9455c88041 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -139,8 +139,10 @@ std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<Tens
return outShapes;
}
-void SplitterLayer::ValidateTensorShapesFromInputs()
+void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
std::vector<TensorShape> views;
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
{
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index a6c8cbe4d7..39aab90853 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -35,7 +35,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SplitterLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index e034cb46a6..6e81890290 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -58,8 +58,10 @@ std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorS
return std::vector<TensorShape>({ targetShape });
}
-void StackLayer::ValidateTensorShapesFromInputs()
+void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
// Validates Stack layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
"StackLayer: Num Input Slots must match Num Inputs.",
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 5ec2e8a55d..5e937db43a 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref StackLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d23d1d0bad..d79caf613c 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -34,8 +34,10 @@ std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<Tenso
throw Exception("Stand in layer does not support infering output shapes");
}
-void StandInLayer::ValidateTensorShapesFromInputs()
+void StandInLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
// Cannot validate this layer since no implementation details can be known by the framework
// so do nothing here.
}
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index d087b939bb..a7e4a2c400 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -26,7 +26,9 @@ public:
/// Check if the input tensor shape(s)
/// Does nothing since cannot validate any properties of this layer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// Empty implementation that throws Exception if called.
/// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index b100f7ab6b..2609b94d91 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -80,8 +80,10 @@ std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
TensorShape(boost::numeric_cast<unsigned int>(outputShape.size()), &outputShape[0]) });
}
-void StridedSliceLayer::ValidateTensorShapesFromInputs()
+void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index 07219697d5..f9ba7e2921 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -31,7 +31,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref StridedSliceLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index c4b065a735..d408de89e7 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -27,8 +27,10 @@ SwitchLayer* SwitchLayer::Clone(Graph& graph) const
return CloneBase<SwitchLayer>(graph, GetName());
}
-void SwitchLayer::ValidateTensorShapesFromInputs()
+void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(2, CHECK_LOCATION());
ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index 2a6a09db24..70223487b9 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -25,7 +25,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SwitchLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
void Accept(ILayerVisitor& visitor) const override;
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 28258820ad..ffe92bbbd2 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -103,8 +103,10 @@ std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
return std::vector<TensorShape>({ tensorShape });
}
-void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
+void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index 24c0e494d5..ecdf7dc1a6 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -32,7 +32,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref TransposeConvolution2dLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// Infers the output shapes from given input shapes and layer properties.
/// @param [in] inputShapes The input shapes the layer has.
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index c058332c90..90f8d1a7db 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -40,8 +40,10 @@ std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<Ten
return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
}
-void TransposeLayer::ValidateTensorShapesFromInputs()
+void TransposeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
{
+ IgnoreUnused(shapeInferenceMethod);
+
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index a668ce835e..3e94a9f4d8 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -26,7 +26,9 @@ public:
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref TransposeLayer.
- void ValidateTensorShapesFromInputs() override;
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs(
+ ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
/// Infers the output shapes from given input shapes and the permutation vector.
/// @param [in] inputShapes The input shapes layer has.