diff options
Diffstat (limited to 'src/armnn/layers')
65 files changed, 385 insertions, 435 deletions
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp index 6f7141551e..ccee524858 100644 --- a/src/armnn/layers/AbsLayer.cpp +++ b/src/armnn/layers/AbsLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,16 @@ AbsLayer* AbsLayer::Clone(Graph& graph) const void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "AbsLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "AbsLayer"); } void AbsLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp index 1aed59b781..c6443b6997 100644 --- a/src/armnn/layers/ActivationLayer.cpp +++ b/src/armnn/layers/ActivationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ActivationLayer.hpp" @@ -30,18 +30,17 @@ ActivationLayer* ActivationLayer::Clone(Graph& graph) const void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ActivationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ActivationLayer"); } void ActivationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp index 911d4e0488..b27f450f73 100644 --- a/src/armnn/layers/AdditionLayer.cpp +++ b/src/armnn/layers/AdditionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp index 288192f98d..e288d16232 100644 --- a/src/armnn/layers/ArgMinMaxLayer.cpp +++ b/src/armnn/layers/ArgMinMaxLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -71,18 +71,17 @@ std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<Ten void ArgMinMaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ArgMinMaxLayer"); } void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp index 9fcc30cbba..fa589dbc75 100644 --- a/src/armnn/layers/BatchNormalizationLayer.cpp +++ b/src/armnn/layers/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "BatchNormalizationLayer.hpp" @@ -50,18 +50,17 @@ BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "BatchNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchNormalizationLayer"); } diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp index 8341b85dfd..fe99e9ebff 100644 --- a/src/armnn/layers/BatchToSpaceNdLayer.cpp +++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,17 +43,17 @@ BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape &outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(),inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchToSpaceNdLayer"); } std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp index 483d2e1c72..4dd3781bdd 100644 --- a/src/armnn/layers/ComparisonLayer.cpp +++ b/src/armnn/layers/ComparisonLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -57,20 +57,19 @@ std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<Te void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ComparisonLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ComparisonLayer"); } void ComparisonLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 9a0672b38d..4f0aa539a1 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConcatLayer.hpp" @@ -244,8 +244,6 @@ std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<Tensor void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - // Validates Concat layer. ConditionalThrowIfNotEqual<LayerValidationException>( "ConcatLayer: Num Inputs must match num views.", @@ -254,6 +252,10 @@ void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer VerifyLayerConnections(m_Param.GetNumViews(), CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inputShapes; for (unsigned int i = 0; i < GetNumInputSlots(); ++i) { @@ -264,10 +266,7 @@ void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConcatLayer"); } void ConcatLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp index d354accd2f..ff4c57c431 100644 --- a/src/armnn/layers/ConstantLayer.cpp +++ b/src/armnn/layers/ConstantLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConstantLayer.hpp" @@ -46,10 +46,19 @@ void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInf // Get the output shape from the value of the constant layer. TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape(); + + ConditionalThrow<LayerValidationException>( + outShape.GetDimensionality() != Dimensionality::NotSpecified, + "Constant layer m_LayerOutput output shape can not be Dimensionality::NotSpecified"); + + ConditionalThrow<LayerValidationException>( + outShape.AreAllDimensionsSpecified(), + "Constant layer m_LayerOutput output shape can not have an unspecified dimension"); + ConditionalThrowIfNotEqual<LayerValidationException>( - "ConstantLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - outShape); + "ConstantLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + outShape); } void ConstantLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp index b53986a497..250ecfa133 100644 --- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,17 @@ ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertBf16ToFp32Layer"); } void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp index 30f9e6340e..f86397fb01 100644 --- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp +++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,17 @@ ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ConvertFp16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertFp16ToFp32Layer"); } void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp index 9f523ae417..15052455e4 100644 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,18 @@ ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ConvertFp32ToBf16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName"); } void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp index 7ff98ed898..0a126e2284 100644 --- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConvertFp32ToFp16Layer.hpp" @@ -31,18 +31,18 @@ ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName"); } void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp index 462d3554d3..a1535ea7cc 100644 --- a/src/armnn/layers/Convolution2dLayer.cpp +++ b/src/armnn/layers/Convolution2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -114,10 +114,12 @@ std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // check if we m_Weight data is not nullptr ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null."); @@ -127,10 +129,7 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sha ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Convolution2dLayer"); } Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp index 3422de6ad3..40bc8fe54b 100644 --- a/src/armnn/layers/DebugLayer.cpp +++ b/src/armnn/layers/DebugLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "DebugLayer.hpp" @@ -36,19 +36,18 @@ DebugLayer* DebugLayer::Clone(Graph& graph) const void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DebugLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DebugLayer"); } void DebugLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp index a3344841c9..4b7c41d317 100644 --- a/src/armnn/layers/DepthToSpaceLayer.cpp +++ b/src/armnn/layers/DepthToSpaceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -59,19 +59,18 @@ std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector< void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DepthToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthToSpaceLayer"); } void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp index 7efb307091..98d9e82f7f 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -121,10 +121,12 @@ DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& i void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // on this level constant data should not be released.. ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null."); @@ -135,10 +137,7 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DepthwiseConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthwiseConvolution2dLayer"); } Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp index e0c3d0ec16..79ef0cba18 100644 --- a/src/armnn/layers/DequantizeLayer.cpp +++ b/src/armnn/layers/DequantizeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "DequantizeLayer.hpp" @@ -31,19 +31,18 @@ DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DequantizeLayer"); } void DequantizeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp index 2deca322ce..fddf86f573 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.cpp +++ b/src/armnn/layers/DetectionPostProcessLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -36,10 +36,12 @@ DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // on this level constant data should not be released. ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null."); @@ -51,22 +53,22 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMet const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes }); const TensorShape& inferredNumberDetections = TensorShape({ 1 }); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredDetectionBoxes); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredDetectionScores); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.", - GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredDetectionScores); - ConditionalThrowIfNotEqual<LayerValidationException>( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[3] does not match the inferred shape.", - GetOutputSlot(3).GetTensorInfo().GetShape(), - inferredNumberDetections); + ValidateAndCopyShape(outputShape, inferredDetectionBoxes, shapeInferenceMethod, "DetectionPostProcessLayer"); + + ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(), + inferredDetectionScores, + shapeInferenceMethod, + "DetectionPostProcessLayer", 1); + + ValidateAndCopyShape(GetOutputSlot(2).GetTensorInfo().GetShape(), + inferredDetectionScores, + shapeInferenceMethod, + "DetectionPostProcessLayer", 2); + + ValidateAndCopyShape(GetOutputSlot(3).GetTensorInfo().GetShape(), + inferredNumberDetections, + shapeInferenceMethod, + "DetectionPostProcessLayer", 3); } Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp index b27d894512..193b96b6ee 100644 --- a/src/armnn/layers/DivisionLayer.cpp +++ b/src/armnn/layers/DivisionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp index 44bbd0b839..e063293815 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.cpp +++ b/src/armnn/layers/ElementwiseBaseLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,10 +49,12 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() @@ -60,11 +62,7 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod s ARMNN_ASSERT(inferredShapes.size() == 1); - std::string msg = GetLayerTypeAsCString(GetType()); - msg += "Layer: TensorShape set on OutputSlot[0] does not match the inferred shape."; - ConditionalThrowIfNotEqual<LayerValidationException>(msg, - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType())); } } // namespace armnn diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp index 0908f391fb..5592c2070e 100644 --- a/src/armnn/layers/ElementwiseUnaryLayer.cpp +++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,18 +42,17 @@ std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vec void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType())); } void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp index 7ed6d7527b..3a1d0d1a50 100644 --- a/src/armnn/layers/FakeQuantizationLayer.cpp +++ b/src/armnn/layers/FakeQuantizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FakeQuantizationLayer.hpp" @@ -31,18 +31,17 @@ FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FakeQuantizationLayer"); } void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp index 688486b368..174fcf72bb 100644 --- a/src/armnn/layers/FillLayer.cpp +++ b/src/armnn/layers/FillLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FillLayer.hpp" @@ -31,11 +31,13 @@ FillLayer* FillLayer::Clone(Graph& graph) const void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); - auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp index 9e46ebf5c2..04d847ee10 100644 --- a/src/armnn/layers/FloorLayer.cpp +++ b/src/armnn/layers/FloorLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FloorLayer.hpp" @@ -31,18 +31,16 @@ FloorLayer* FloorLayer::Clone(Graph& graph) const void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); - auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "FloorLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FloorLayer"); } void FloorLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp index bd947b7678..174459b565 100644 --- a/src/armnn/layers/FullyConnectedLayer.cpp +++ b/src/armnn/layers/FullyConnectedLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FullyConnectedLayer.hpp" @@ -65,21 +65,20 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sh { IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); // check if we m_Weight data is not nullptr ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null."); - auto inferredShapes = InferOutputShapes({ - GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - m_Weight->GetTensorInfo().GetShape() }); + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + m_Weight->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); + ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified); - ConditionalThrowIfNotEqual<LayerValidationException>( - "FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FullyConnectedLayer"); } Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp index 3e85d25dac..52bf4324a2 100644 --- a/src/armnn/layers/GatherLayer.cpp +++ b/src/armnn/layers/GatherLayer.cpp @@ -31,10 +31,12 @@ GatherLayer* GatherLayer::Clone(Graph& graph) const void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& indices = GetInputSlot(1).GetConnection()->GetTensorInfo(); @@ -66,10 +68,7 @@ void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data()); - ConditionalThrowIfNotEqual<LayerValidationException>( - "GatherLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShape); + ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "GatherLayer"); } void GatherLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp index 42ce153346..5a78ecc981 100644 --- a/src/armnn/layers/InputLayer.cpp +++ b/src/armnn/layers/InputLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "InputLayer.hpp" diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp index f79e423c8c..4f753e21bf 100644 --- a/src/armnn/layers/InstanceNormalizationLayer.cpp +++ b/src/armnn/layers/InstanceNormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "InstanceNormalizationLayer.hpp" @@ -31,18 +31,17 @@ InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) cons void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "InstanceNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "InstanceNormalizationLayer"); } void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp index 4a2945efbe..d4ac8019cf 100644 --- a/src/armnn/layers/L2NormalizationLayer.cpp +++ b/src/armnn/layers/L2NormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "L2NormalizationLayer.hpp" @@ -31,18 +31,17 @@ L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "L2NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "L2NormalizationLayer"); } void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp index ab05fd24f3..f834ccef9d 100644 --- a/src/armnn/layers/LogSoftmaxLayer.cpp +++ b/src/armnn/layers/LogSoftmaxLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,17 +31,16 @@ LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "LogSoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LogSoftmaxLayer"); } void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp index af708e4e06..44f5d1f40b 100644 --- a/src/armnn/layers/LstmLayer.cpp +++ b/src/armnn/layers/LstmLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LstmLayer.hpp" @@ -165,15 +165,17 @@ std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorSh void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(3, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), - GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape()} - ); + GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() + }); ARMNN_ASSERT(inferredShapes.size() == 4); @@ -206,10 +208,7 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr, "LstmLayer: m_CifgParameters.m_InputGateBias should not be null."); - ConditionalThrowIfNotEqual<LayerValidationException>( - "LstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer"); } else { @@ -220,10 +219,7 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr, "LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled."); - ConditionalThrowIfNotEqual<LayerValidationException>( - "LstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer"); } if (m_Param.m_ProjectionEnabled) @@ -246,18 +242,12 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen "LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null."); } - ConditionalThrowIfNotEqual<LayerValidationException>( - "LstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[1]); - ConditionalThrowIfNotEqual<LayerValidationException>( - "LstmLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.", - GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredShapes[2]); - ConditionalThrowIfNotEqual<LayerValidationException>( - "LstmLayer: TensorShape set on OutputSlot[3] does not match the inferred shape.", - GetOutputSlot(3).GetTensorInfo().GetShape(), - inferredShapes[3]); + ValidateAndCopyShape( + GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "LstmLayer", 1); + ValidateAndCopyShape( + GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "LstmLayer", 2); + ValidateAndCopyShape( + GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], shapeInferenceMethod, "LstmLayer", 3); if (m_Param.m_LayerNormEnabled) { diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp index bfc42e9ac4..ab7bf88f5f 100644 --- a/src/armnn/layers/MaximumLayer.cpp +++ b/src/armnn/layers/MaximumLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp index 9cefd7d1f8..da635661e1 100644 --- a/src/armnn/layers/MeanLayer.cpp +++ b/src/armnn/layers/MeanLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,10 +40,12 @@ MeanLayer* MeanLayer::Clone(Graph& graph) const void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo(); ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4, @@ -95,10 +97,7 @@ void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen } const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data()); - ConditionalThrowIfNotEqual<LayerValidationException>( - "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShape); + ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "MeanLayer"); } void MeanLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp index c087290682..b4fe68bd8d 100644 --- a/src/armnn/layers/MemCopyLayer.cpp +++ b/src/armnn/layers/MemCopyLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "MemCopyLayer.hpp" @@ -35,18 +35,17 @@ std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "MemCopyLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemCopyLayer"); } void MemCopyLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp index 02092f4b4c..a356f054dc 100644 --- a/src/armnn/layers/MemImportLayer.cpp +++ b/src/armnn/layers/MemImportLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "MemImportLayer.hpp" @@ -35,18 +35,17 @@ std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "MemImportLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemImportLayer"); } void MemImportLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp index b05eb68d25..d8351c6c40 100644 --- a/src/armnn/layers/MergeLayer.cpp +++ b/src/armnn/layers/MergeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "MergeLayer.hpp" @@ -29,10 +29,12 @@ MergeLayer* MergeLayer::Clone(Graph& graph) const void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), @@ -40,10 +42,7 @@ void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "MergeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MergeLayer"); } std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp index fb54c3d7ae..9154d788d5 100644 --- a/src/armnn/layers/MinimumLayer.cpp +++ b/src/armnn/layers/MinimumLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp index dd0303a567..f02ee57a2c 100644 --- a/src/armnn/layers/MultiplicationLayer.cpp +++ b/src/armnn/layers/MultiplicationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp index 9011ece821..e8176cec22 100644 --- a/src/armnn/layers/NormalizationLayer.cpp +++ b/src/armnn/layers/NormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "NormalizationLayer.hpp" @@ -31,18 +31,17 @@ NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "NormalizationLayer"); } void NormalizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp index 5ff91880f8..db76244ad1 100644 --- a/src/armnn/layers/OutputLayer.cpp +++ b/src/armnn/layers/OutputLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "OutputLayer.hpp" diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp index 690318d308..296bfa901e 100644 --- a/src/armnn/layers/PadLayer.cpp +++ b/src/armnn/layers/PadLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp index a585e66538..b9380aa44b 100644 --- a/src/armnn/layers/PermuteLayer.cpp +++ b/src/armnn/layers/PermuteLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,18 +42,17 @@ std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<Tenso void PermuteLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "PermuteLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PermuteLayer"); } void PermuteLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp index defed9758b..bed49ee059 100644 --- a/src/armnn/layers/Pooling2dLayer.cpp +++ b/src/armnn/layers/Pooling2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -102,18 +102,17 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten void Pooling2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Pooling2dLayer"); } void Pooling2dLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp index 577e19f3cc..e043fac432 100644 --- a/src/armnn/layers/PreCompiledLayer.cpp +++ b/src/armnn/layers/PreCompiledLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp index 2527cb91be..fcf50f2590 100644 --- a/src/armnn/layers/PreluLayer.cpp +++ b/src/armnn/layers/PreluLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -96,10 +96,12 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), @@ -108,10 +110,7 @@ void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PreluLayer"); } void PreluLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp index c5155d7bf9..321d985fed 100644 --- a/src/armnn/layers/QLstmLayer.cpp +++ b/src/armnn/layers/QLstmLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "QLstmLayer.hpp" @@ -167,15 +167,17 @@ std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorS void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(3, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), // input GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), // previousOutputIn - GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousCellStateIn + GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousCellStateIn }); ARMNN_ASSERT(inferredShapes.size() == 3); @@ -209,10 +211,7 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr, "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null."); - ConditionalThrowIfNotEqual<LayerValidationException>( - "QLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer"); } else { @@ -224,10 +223,7 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr, "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled."); - ConditionalThrowIfNotEqual<LayerValidationException>( - "QLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer"); } if (m_Param.m_ProjectionEnabled) @@ -250,14 +246,10 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere "QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null."); } - ConditionalThrowIfNotEqual<LayerValidationException>( - "QLstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[1]); - ConditionalThrowIfNotEqual<LayerValidationException>( - "QLstmLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.", - GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredShapes[2]); + ValidateAndCopyShape( + GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "QLstmLayer", 1); + ValidateAndCopyShape( + GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "QLstmLayer", 2); if (m_Param.m_LayerNormEnabled) { diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp index 5cfac25e4b..990d2b4b88 100644 --- a/src/armnn/layers/QuantizeLayer.cpp +++ b/src/armnn/layers/QuantizeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,16 +31,15 @@ Layer* QuantizeLayer::Clone(Graph& graph) const void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); - ConditionalThrowIfNotEqual<LayerValidationException>( - "QuantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizeLayer"); } void QuantizeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp index e26857e01e..432d50dc26 100644 --- a/src/armnn/layers/QuantizedLstmLayer.cpp +++ b/src/armnn/layers/QuantizedLstmLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "QuantizedLstmLayer.hpp" @@ -93,10 +93,12 @@ std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(3, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), // input @@ -135,15 +137,13 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sha "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null."); // Check output TensorShape(s) match inferred shape - ConditionalThrowIfNotEqual<LayerValidationException>( - "QuantizedLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); - - ConditionalThrowIfNotEqual<LayerValidationException>( - "QuantizedLstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[1]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizedLstmLayer"); + + ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(), + inferredShapes[1], + shapeInferenceMethod, + "QuantizedLstmLayer", + 1); } Layer::ConstantTensors QuantizedLstmLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp index f1a24b1633..62f77df16f 100644 --- a/src/armnn/layers/RankLayer.cpp +++ b/src/armnn/layers/RankLayer.cpp @@ -34,11 +34,12 @@ void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); - ConditionalThrowIfNotEqual<LayerValidationException>( - "Rank: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), {TensorShape{Dimensionality::Scalar}}); -} + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + const TensorShape inferredShape = TensorShape(Dimensionality::Scalar); + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "RankLayer"); +} void RankLayer::Accept(ILayerVisitor& visitor) const { visitor.VisitRankLayer(this, GetName()); diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp index 0257ca9b15..3f955a57b0 100644 --- a/src/armnn/layers/ReshapeLayer.cpp +++ b/src/armnn/layers/ReshapeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ReshapeLayer.hpp" @@ -38,18 +38,17 @@ std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<Tenso void ReshapeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); - auto inferredShapes = InferOutputShapes({ }); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ReshapeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ReshapeLayer"); } void ReshapeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp index b07eb9a7d6..ab8430ac00 100644 --- a/src/armnn/layers/ResizeLayer.cpp +++ b/src/armnn/layers/ResizeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -60,18 +60,17 @@ std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<Tensor void ResizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ResizeLayer"); } void ResizeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp index b3aecb27a5..a68b5a4766 100644 --- a/src/armnn/layers/RsqrtLayer.cpp +++ b/src/armnn/layers/RsqrtLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,17 @@ RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "RsqrtLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "RsqrtLayer"); } void RsqrtLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp index 2aa32e3b44..a31f6037e1 100644 --- a/src/armnn/layers/SliceLayer.cpp +++ b/src/armnn/layers/SliceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -35,18 +35,17 @@ SliceLayer* SliceLayer::Clone(Graph& graph) const void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "SliceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SliceLayer"); } std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp index 56c3792c1b..3da2cb2b00 100644 --- a/src/armnn/layers/SoftmaxLayer.cpp +++ b/src/armnn/layers/SoftmaxLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "SoftmaxLayer.hpp" @@ -31,18 +31,17 @@ SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "SoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SoftmaxLayer"); } void SoftmaxLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp index fbc3ca755d..4eba06691c 100644 --- a/src/armnn/layers/SpaceToBatchNdLayer.cpp +++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -68,19 +68,18 @@ std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vecto void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "SpaceToBatchNdLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToBatchNdLayer"); } void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp index 5c8e2d4551..4695d812bc 100644 --- a/src/armnn/layers/SpaceToDepthLayer.cpp +++ b/src/armnn/layers/SpaceToDepthLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -61,19 +61,18 @@ std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector< void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToDepthLayer"); } void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp index 9455c88041..60dc9611e8 100644 --- a/src/armnn/layers/SplitterLayer.cpp +++ b/src/armnn/layers/SplitterLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "SplitterLayer.hpp" @@ -141,7 +141,10 @@ std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<Tens void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); + std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot) + { + VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), shapeInferenceMethod); + }); std::vector<TensorShape> views; for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++) @@ -156,10 +159,11 @@ void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInf for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++) { - ConditionalThrowIfNotEqual<LayerValidationException>( - "SplitterLayer: View sizes must match output tensor shapes.", - GetOutputSlot(viewIdx).GetTensorInfo().GetShape(), - inferredShapes[viewIdx]); + ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(), + inferredShapes[viewIdx], + shapeInferenceMethod, + "SplitterLayer", + viewIdx); } } diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp index 6e81890290..f5d761bdc5 100644 --- a/src/armnn/layers/StackLayer.cpp +++ b/src/armnn/layers/StackLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "StackLayer.hpp" @@ -60,8 +60,6 @@ std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorS void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - // Validates Stack layer. ConditionalThrowIfNotEqual<LayerValidationException>( "StackLayer: Num Input Slots must match Num Inputs.", @@ -70,6 +68,10 @@ void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere VerifyLayerConnections(m_Param.m_NumInputs, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // Constructs and validates input shapes std::vector<TensorShape> inputShapes; for (unsigned int i = 0; i < GetNumInputSlots(); ++i) @@ -88,10 +90,7 @@ void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StackLayer"); } void StackLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp index d79caf613c..623f4a5b3f 100644 --- a/src/armnn/layers/StandInLayer.cpp +++ b/src/armnn/layers/StandInLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp index fbe9815c06..fc9df856ec 100644 --- a/src/armnn/layers/StridedSliceLayer.cpp +++ b/src/armnn/layers/StridedSliceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "StridedSliceLayer.hpp" @@ -96,18 +96,17 @@ std::vector<TensorShape> StridedSliceLayer::InferOutputShapes( void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "StridedSlice: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StridedSliceLayer"); } void StridedSliceLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp index 0797742aea..82c96428a5 100644 --- a/src/armnn/layers/SubtractionLayer.cpp +++ b/src/armnn/layers/SubtractionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp index d408de89e7..b763b0804c 100644 --- a/src/armnn/layers/SwitchLayer.cpp +++ b/src/armnn/layers/SwitchLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "SwitchLayer.hpp" @@ -29,28 +29,25 @@ SwitchLayer* SwitchLayer::Clone(Graph& graph) const void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs."); // Assuming first input is the Input and second input is the Constant std::vector<TensorShape> inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 2); - ConditionalThrowIfNotEqual<LayerValidationException>( - "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SwitchLayer"); - ConditionalThrowIfNotEqual<LayerValidationException>( - "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape( + GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "SwitchLayer", 1); } void SwitchLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp index 8a264253e0..7074be9659 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.cpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -1,13 +1,11 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "TransposeConvolution2dLayer.hpp" #include "LayerCloneBase.hpp" -#include <armnn/TypesUtils.hpp> - #include <armnnUtils/DataLayoutIndexed.hpp> #include <backendsCommon/CpuTensorHandle.hpp> @@ -105,10 +103,12 @@ std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes( void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null."); std::vector<TensorShape> expectedOutputShape; @@ -127,10 +127,7 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM ARMNN_ASSERT(expectedOutputShape.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - expectedOutputShape[0]); + ValidateAndCopyShape(outputShape, expectedOutputShape[0], shapeInferenceMethod, "TransposeConvolution2dLayer"); } Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp index 90f8d1a7db..7dfb003019 100644 --- a/src/armnn/layers/TransposeLayer.cpp +++ b/src/armnn/layers/TransposeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,18 +42,17 @@ std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<Ten void TransposeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual<LayerValidationException>( - "TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "TransposeLayer"); } void TransposeLayer::Accept(ILayerVisitor& visitor) const |