From 87d0bda9b49d9df4455f1887027e5ead2527c27e Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Fri, 3 Jul 2020 10:12:03 +0100 Subject: IVGCVSW-4929 Implement ShapeInferenceMethod in all Layers Signed-off-by: Finn Williams Change-Id: I2c2d99f97cf89814140b057a9f93f41b364197f5 --- CMakeLists.txt | 1 + src/armnn/Layer.cpp | 63 ++ src/armnn/Layer.hpp | 8 + src/armnn/Tensor.cpp | 1 - src/armnn/layers/AbsLayer.cpp | 12 +- src/armnn/layers/ActivationLayer.cpp | 13 +- src/armnn/layers/AdditionLayer.cpp | 2 +- src/armnn/layers/ArgMinMaxLayer.cpp | 13 +- src/armnn/layers/BatchNormalizationLayer.cpp | 13 +- src/armnn/layers/BatchToSpaceNdLayer.cpp | 12 +- src/armnn/layers/ComparisonLayer.cpp | 13 +- src/armnn/layers/ConcatLayer.cpp | 13 +- src/armnn/layers/ConstantLayer.cpp | 17 +- src/armnn/layers/ConvertBf16ToFp32Layer.cpp | 13 +- src/armnn/layers/ConvertFp16ToFp32Layer.cpp | 13 +- src/armnn/layers/ConvertFp32ToBf16Layer.cpp | 12 +- src/armnn/layers/ConvertFp32ToFp16Layer.cpp | 12 +- src/armnn/layers/Convolution2dLayer.cpp | 13 +- src/armnn/layers/DebugLayer.cpp | 13 +- src/armnn/layers/DepthToSpaceLayer.cpp | 13 +- src/armnn/layers/DepthwiseConvolution2dLayer.cpp | 13 +- src/armnn/layers/DequantizeLayer.cpp | 13 +- src/armnn/layers/DetectionPostProcessLayer.cpp | 40 +- src/armnn/layers/DivisionLayer.cpp | 2 +- src/armnn/layers/ElementwiseBaseLayer.cpp | 14 +- src/armnn/layers/ElementwiseUnaryLayer.cpp | 13 +- src/armnn/layers/FakeQuantizationLayer.cpp | 13 +- src/armnn/layers/FillLayer.cpp | 10 +- src/armnn/layers/FloorLayer.cpp | 14 +- src/armnn/layers/FullyConnectedLayer.cpp | 17 +- src/armnn/layers/GatherLayer.cpp | 11 +- src/armnn/layers/InputLayer.cpp | 2 +- src/armnn/layers/InstanceNormalizationLayer.cpp | 13 +- src/armnn/layers/L2NormalizationLayer.cpp | 13 +- src/armnn/layers/LogSoftmaxLayer.cpp | 13 +- src/armnn/layers/LstmLayer.cpp | 40 +- src/armnn/layers/MaximumLayer.cpp | 2 +- src/armnn/layers/MeanLayer.cpp | 13 +- src/armnn/layers/MemCopyLayer.cpp | 13 +- src/armnn/layers/MemImportLayer.cpp | 13 +- src/armnn/layers/MergeLayer.cpp | 13 +- src/armnn/layers/MinimumLayer.cpp | 2 +- src/armnn/layers/MultiplicationLayer.cpp | 2 +- src/armnn/layers/NormalizationLayer.cpp | 13 +- src/armnn/layers/OutputLayer.cpp | 2 +- src/armnn/layers/PadLayer.cpp | 2 +- src/armnn/layers/PermuteLayer.cpp | 13 +- src/armnn/layers/Pooling2dLayer.cpp | 13 +- src/armnn/layers/PreCompiledLayer.cpp | 2 +- src/armnn/layers/PreluLayer.cpp | 13 +- src/armnn/layers/QLstmLayer.cpp | 32 +- src/armnn/layers/QuantizeLayer.cpp | 13 +- src/armnn/layers/QuantizedLstmLayer.cpp | 24 +- src/armnn/layers/RankLayer.cpp | 9 +- src/armnn/layers/ReshapeLayer.cpp | 15 +- src/armnn/layers/ResizeLayer.cpp | 13 +- src/armnn/layers/RsqrtLayer.cpp | 13 +- src/armnn/layers/SliceLayer.cpp | 13 +- src/armnn/layers/SoftmaxLayer.cpp | 13 +- src/armnn/layers/SpaceToBatchNdLayer.cpp | 13 +- src/armnn/layers/SpaceToDepthLayer.cpp | 13 +- src/armnn/layers/SplitterLayer.cpp | 16 +- src/armnn/layers/StackLayer.cpp | 13 +- src/armnn/layers/StandInLayer.cpp | 2 +- src/armnn/layers/StridedSliceLayer.cpp | 13 +- src/armnn/layers/SubtractionLayer.cpp | 2 +- src/armnn/layers/SwitchLayer.cpp | 21 +- src/armnn/layers/TransposeConvolution2dLayer.cpp | 15 +- src/armnn/layers/TransposeLayer.cpp | 13 +- src/armnn/test/FlowControl.cpp | 7 +- src/armnn/test/ShapeInferenceTests.cpp | 694 +++++++++++++++++++++++ 71 files changed, 1157 insertions(+), 437 deletions(-) create mode 100644 src/armnn/test/ShapeInferenceTests.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 2586d22f0c..13b47655c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -664,6 +664,7 @@ if(BUILD_UNIT_TESTS) src/armnn/test/OptionalTest.cpp src/armnn/test/ProfilerTests.cpp src/armnn/test/ProfilingEventTest.cpp + src/armnn/test/ShapeInferenceTests.cpp src/armnn/test/SubgraphViewTests.cpp src/armnn/test/TensorHandleStrategyTest.cpp src/armnn/test/TensorHelpers.hpp diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp index 12ab035079..692ee32acd 100644 --- a/src/armnn/Layer.cpp +++ b/src/armnn/Layer.cpp @@ -396,6 +396,69 @@ std::vector Layer::InferOutputShapes(const std::vector return inputShapes; } +void Layer::ValidateAndCopyShape(const TensorShape& outputShape, + const TensorShape& inferredShape, + const ShapeInferenceMethod shapeInferenceMethod, + const std::string& layerName, + const unsigned int outputSlotIndex) +{ + if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly) + { + ConditionalThrowIfNotEqual( + layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.", + outputShape, + inferredShape); + return; + } + + if (outputShape.GetDimensionality() == Dimensionality::Specified) + { + for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i) + { + if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i]) + { + std::stringstream ss; + ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex << + "] does not match the inferred shape at dimension index ["; + ss << i << "] " << outputShape << " != " << inferredShape; + throw LayerValidationException(ss.str()); + } + } + } + + TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo(); + + armnn::TensorInfo inferredTensorInfo(inferredShape, + info.GetDataType(), + info.GetQuantizationScale(), + info.GetQuantizationOffset()); + + GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo); +} + +void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod) +{ + if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly) + { + ConditionalThrow( + outputShape.GetDimensionality() != Dimensionality::NotSpecified, + "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly"); + + ConditionalThrow( + outputShape.AreAllDimensionsSpecified(), + "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly"); + } + else + { + if (outputShape.GetDimensionality() == Dimensionality::Specified) + { + ConditionalThrow( + !outputShape.AreAllDimensionsSpecified(), + "No unspecified dimension while using ShapeInferenceMethod::InferAndValidate"); + } + } +} + void Layer::SerializeLayerParameters(ParameterStringifyFunction& fn) const { std::string layerType = GetLayerTypeAsCString(m_Type); diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp index 303de052fc..f1954b9d07 100644 --- a/src/armnn/Layer.hpp +++ b/src/armnn/Layer.hpp @@ -347,6 +347,14 @@ protected: CollectWorkloadOutputs(dataCollector); } + void ValidateAndCopyShape(const TensorShape& outputShape, + const TensorShape& inferredShape, + const ShapeInferenceMethod shapeInferenceMethod, + const std::string& layerName, + const unsigned int outputSlotIndex = 0); + + void VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod); + /// Helper function to reduce duplication in *Layer::CreateWorkload. template WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor) const diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp index 80aa0a920f..c2dd1d3ac0 100644 --- a/src/armnn/Tensor.cpp +++ b/src/armnn/Tensor.cpp @@ -233,7 +233,6 @@ void TensorShape::SetNumDimensions(unsigned int numDimensions, bool initDimensio void TensorShape::SetDimensionSize(unsigned int i, unsigned int dimensionSize) { CheckScalar(); - CheckUnspecifiedNumDimensions(); CheckDimensionIndex(i); m_Dimensions[i] = dimensionSize; diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp index 6f7141551e..ccee524858 100644 --- a/src/armnn/layers/AbsLayer.cpp +++ b/src/armnn/layers/AbsLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,16 @@ AbsLayer* AbsLayer::Clone(Graph& graph) const void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "AbsLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "AbsLayer"); } void AbsLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp index 1aed59b781..c6443b6997 100644 --- a/src/armnn/layers/ActivationLayer.cpp +++ b/src/armnn/layers/ActivationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ActivationLayer.hpp" @@ -30,18 +30,17 @@ ActivationLayer* ActivationLayer::Clone(Graph& graph) const void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ActivationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ActivationLayer"); } void ActivationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp index 911d4e0488..b27f450f73 100644 --- a/src/armnn/layers/AdditionLayer.cpp +++ b/src/armnn/layers/AdditionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp index 288192f98d..e288d16232 100644 --- a/src/armnn/layers/ArgMinMaxLayer.cpp +++ b/src/armnn/layers/ArgMinMaxLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -71,18 +71,17 @@ std::vector ArgMinMaxLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ArgMinMaxLayer"); } void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp index 9fcc30cbba..fa589dbc75 100644 --- a/src/armnn/layers/BatchNormalizationLayer.cpp +++ b/src/armnn/layers/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "BatchNormalizationLayer.hpp" @@ -50,18 +50,17 @@ BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "BatchNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchNormalizationLayer"); } diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp index 8341b85dfd..fe99e9ebff 100644 --- a/src/armnn/layers/BatchToSpaceNdLayer.cpp +++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -43,17 +43,17 @@ BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape &outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(),inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchToSpaceNdLayer"); } std::vector BatchToSpaceNdLayer::InferOutputShapes(const std::vector& inputShapes) const diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp index 483d2e1c72..4dd3781bdd 100644 --- a/src/armnn/layers/ComparisonLayer.cpp +++ b/src/armnn/layers/ComparisonLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -57,20 +57,19 @@ std::vector ComparisonLayer::InferOutputShapes(const std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ComparisonLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ComparisonLayer"); } void ComparisonLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 9a0672b38d..4f0aa539a1 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConcatLayer.hpp" @@ -244,8 +244,6 @@ std::vector ConcatLayer::InferOutputShapes(const std::vector( "ConcatLayer: Num Inputs must match num views.", @@ -254,6 +252,10 @@ void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer VerifyLayerConnections(m_Param.GetNumViews(), CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inputShapes; for (unsigned int i = 0; i < GetNumInputSlots(); ++i) { @@ -264,10 +266,7 @@ void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConcatLayer"); } void ConcatLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp index d354accd2f..ff4c57c431 100644 --- a/src/armnn/layers/ConstantLayer.cpp +++ b/src/armnn/layers/ConstantLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConstantLayer.hpp" @@ -46,10 +46,19 @@ void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInf // Get the output shape from the value of the constant layer. TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape(); + + ConditionalThrow( + outShape.GetDimensionality() != Dimensionality::NotSpecified, + "Constant layer m_LayerOutput output shape can not be Dimensionality::NotSpecified"); + + ConditionalThrow( + outShape.AreAllDimensionsSpecified(), + "Constant layer m_LayerOutput output shape can not have an unspecified dimension"); + ConditionalThrowIfNotEqual( - "ConstantLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - outShape); + "ConstantLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + outShape); } void ConstantLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp index b53986a497..250ecfa133 100644 --- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,17 @@ ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertBf16ToFp32Layer"); } void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp index 30f9e6340e..f86397fb01 100644 --- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp +++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,17 @@ ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ConvertFp16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertFp16ToFp32Layer"); } void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp index 9f523ae417..15052455e4 100644 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,18 @@ ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ConvertFp32ToBf16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName"); } void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp index 7ff98ed898..0a126e2284 100644 --- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConvertFp32ToFp16Layer.hpp" @@ -31,18 +31,18 @@ ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName"); } void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp index 462d3554d3..a1535ea7cc 100644 --- a/src/armnn/layers/Convolution2dLayer.cpp +++ b/src/armnn/layers/Convolution2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -114,10 +114,12 @@ std::vector Convolution2dLayer::InferOutputShapes(const std::vector void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // check if we m_Weight data is not nullptr ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null."); @@ -127,10 +129,7 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sha ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Convolution2dLayer"); } Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp index 3422de6ad3..40bc8fe54b 100644 --- a/src/armnn/layers/DebugLayer.cpp +++ b/src/armnn/layers/DebugLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "DebugLayer.hpp" @@ -36,19 +36,18 @@ DebugLayer* DebugLayer::Clone(Graph& graph) const void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "DebugLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DebugLayer"); } void DebugLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp index a3344841c9..4b7c41d317 100644 --- a/src/armnn/layers/DepthToSpaceLayer.cpp +++ b/src/armnn/layers/DepthToSpaceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -59,19 +59,18 @@ std::vector DepthToSpaceLayer::InferOutputShapes(const std::vector< void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "DepthToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthToSpaceLayer"); } void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp index 7efb307091..98d9e82f7f 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -121,10 +121,12 @@ DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector& i void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // on this level constant data should not be released.. ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null."); @@ -135,10 +137,7 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "DepthwiseConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthwiseConvolution2dLayer"); } Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp index e0c3d0ec16..79ef0cba18 100644 --- a/src/armnn/layers/DequantizeLayer.cpp +++ b/src/armnn/layers/DequantizeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "DequantizeLayer.hpp" @@ -31,19 +31,18 @@ DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DequantizeLayer"); } void DequantizeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp index 2deca322ce..fddf86f573 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.cpp +++ b/src/armnn/layers/DetectionPostProcessLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -36,10 +36,12 @@ DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // on this level constant data should not be released. ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null."); @@ -51,22 +53,22 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMet const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes }); const TensorShape& inferredNumberDetections = TensorShape({ 1 }); - ConditionalThrowIfNotEqual( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredDetectionBoxes); - ConditionalThrowIfNotEqual( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredDetectionScores); - ConditionalThrowIfNotEqual( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.", - GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredDetectionScores); - ConditionalThrowIfNotEqual( - "DetectionPostProcessLayer: TensorShape set on OutputSlot[3] does not match the inferred shape.", - GetOutputSlot(3).GetTensorInfo().GetShape(), - inferredNumberDetections); + ValidateAndCopyShape(outputShape, inferredDetectionBoxes, shapeInferenceMethod, "DetectionPostProcessLayer"); + + ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(), + inferredDetectionScores, + shapeInferenceMethod, + "DetectionPostProcessLayer", 1); + + ValidateAndCopyShape(GetOutputSlot(2).GetTensorInfo().GetShape(), + inferredDetectionScores, + shapeInferenceMethod, + "DetectionPostProcessLayer", 2); + + ValidateAndCopyShape(GetOutputSlot(3).GetTensorInfo().GetShape(), + inferredNumberDetections, + shapeInferenceMethod, + "DetectionPostProcessLayer", 3); } Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp index b27d894512..193b96b6ee 100644 --- a/src/armnn/layers/DivisionLayer.cpp +++ b/src/armnn/layers/DivisionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp index 44bbd0b839..e063293815 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.cpp +++ b/src/armnn/layers/ElementwiseBaseLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,10 +49,12 @@ std::vector ElementwiseBaseLayer::InferOutputShapes(const std::vect void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() @@ -60,11 +62,7 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod s ARMNN_ASSERT(inferredShapes.size() == 1); - std::string msg = GetLayerTypeAsCString(GetType()); - msg += "Layer: TensorShape set on OutputSlot[0] does not match the inferred shape."; - ConditionalThrowIfNotEqual(msg, - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType())); } } // namespace armnn diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp index 0908f391fb..5592c2070e 100644 --- a/src/armnn/layers/ElementwiseUnaryLayer.cpp +++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,18 +42,17 @@ std::vector ElementwiseUnaryLayer::InferOutputShapes(const std::vec void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType())); } void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp index 7ed6d7527b..3a1d0d1a50 100644 --- a/src/armnn/layers/FakeQuantizationLayer.cpp +++ b/src/armnn/layers/FakeQuantizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FakeQuantizationLayer.hpp" @@ -31,18 +31,17 @@ FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FakeQuantizationLayer"); } void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp index 688486b368..174fcf72bb 100644 --- a/src/armnn/layers/FillLayer.cpp +++ b/src/armnn/layers/FillLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FillLayer.hpp" @@ -31,11 +31,13 @@ FillLayer* FillLayer::Clone(Graph& graph) const void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); - auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp index 9e46ebf5c2..04d847ee10 100644 --- a/src/armnn/layers/FloorLayer.cpp +++ b/src/armnn/layers/FloorLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FloorLayer.hpp" @@ -31,18 +31,16 @@ FloorLayer* FloorLayer::Clone(Graph& graph) const void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); - auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "FloorLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FloorLayer"); } void FloorLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp index bd947b7678..174459b565 100644 --- a/src/armnn/layers/FullyConnectedLayer.cpp +++ b/src/armnn/layers/FullyConnectedLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "FullyConnectedLayer.hpp" @@ -65,21 +65,20 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sh { IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); // check if we m_Weight data is not nullptr ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null."); - auto inferredShapes = InferOutputShapes({ - GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - m_Weight->GetTensorInfo().GetShape() }); + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + m_Weight->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); + ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified); - ConditionalThrowIfNotEqual( - "FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FullyConnectedLayer"); } Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp index 3e85d25dac..52bf4324a2 100644 --- a/src/armnn/layers/GatherLayer.cpp +++ b/src/armnn/layers/GatherLayer.cpp @@ -31,10 +31,12 @@ GatherLayer* GatherLayer::Clone(Graph& graph) const void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& indices = GetInputSlot(1).GetConnection()->GetTensorInfo(); @@ -66,10 +68,7 @@ void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfer const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data()); - ConditionalThrowIfNotEqual( - "GatherLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShape); + ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "GatherLayer"); } void GatherLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp index 42ce153346..5a78ecc981 100644 --- a/src/armnn/layers/InputLayer.cpp +++ b/src/armnn/layers/InputLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "InputLayer.hpp" diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp index f79e423c8c..4f753e21bf 100644 --- a/src/armnn/layers/InstanceNormalizationLayer.cpp +++ b/src/armnn/layers/InstanceNormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "InstanceNormalizationLayer.hpp" @@ -31,18 +31,17 @@ InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) cons void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "InstanceNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "InstanceNormalizationLayer"); } void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp index 4a2945efbe..d4ac8019cf 100644 --- a/src/armnn/layers/L2NormalizationLayer.cpp +++ b/src/armnn/layers/L2NormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "L2NormalizationLayer.hpp" @@ -31,18 +31,17 @@ L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "L2NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "L2NormalizationLayer"); } void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp index ab05fd24f3..f834ccef9d 100644 --- a/src/armnn/layers/LogSoftmaxLayer.cpp +++ b/src/armnn/layers/LogSoftmaxLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,17 +31,16 @@ LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "LogSoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LogSoftmaxLayer"); } void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp index af708e4e06..44f5d1f40b 100644 --- a/src/armnn/layers/LstmLayer.cpp +++ b/src/armnn/layers/LstmLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LstmLayer.hpp" @@ -165,15 +165,17 @@ std::vector LstmLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), - GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape()} - ); + GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() + }); ARMNN_ASSERT(inferredShapes.size() == 4); @@ -206,10 +208,7 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr, "LstmLayer: m_CifgParameters.m_InputGateBias should not be null."); - ConditionalThrowIfNotEqual( - "LstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer"); } else { @@ -220,10 +219,7 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr, "LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled."); - ConditionalThrowIfNotEqual( - "LstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer"); } if (m_Param.m_ProjectionEnabled) @@ -246,18 +242,12 @@ void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen "LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null."); } - ConditionalThrowIfNotEqual( - "LstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[1]); - ConditionalThrowIfNotEqual( - "LstmLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.", - GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredShapes[2]); - ConditionalThrowIfNotEqual( - "LstmLayer: TensorShape set on OutputSlot[3] does not match the inferred shape.", - GetOutputSlot(3).GetTensorInfo().GetShape(), - inferredShapes[3]); + ValidateAndCopyShape( + GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "LstmLayer", 1); + ValidateAndCopyShape( + GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "LstmLayer", 2); + ValidateAndCopyShape( + GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], shapeInferenceMethod, "LstmLayer", 3); if (m_Param.m_LayerNormEnabled) { diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp index bfc42e9ac4..ab7bf88f5f 100644 --- a/src/armnn/layers/MaximumLayer.cpp +++ b/src/armnn/layers/MaximumLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp index 9cefd7d1f8..da635661e1 100644 --- a/src/armnn/layers/MeanLayer.cpp +++ b/src/armnn/layers/MeanLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -40,10 +40,12 @@ MeanLayer* MeanLayer::Clone(Graph& graph) const void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo(); ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4, @@ -95,10 +97,7 @@ void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen } const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data()); - ConditionalThrowIfNotEqual( - "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShape); + ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "MeanLayer"); } void MeanLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp index c087290682..b4fe68bd8d 100644 --- a/src/armnn/layers/MemCopyLayer.cpp +++ b/src/armnn/layers/MemCopyLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "MemCopyLayer.hpp" @@ -35,18 +35,17 @@ std::unique_ptr MemCopyLayer::CreateWorkload(const IWorkloadFactory& void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "MemCopyLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemCopyLayer"); } void MemCopyLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp index 02092f4b4c..a356f054dc 100644 --- a/src/armnn/layers/MemImportLayer.cpp +++ b/src/armnn/layers/MemImportLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "MemImportLayer.hpp" @@ -35,18 +35,17 @@ std::unique_ptr MemImportLayer::CreateWorkload(const IWorkloadFactory void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "MemImportLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemImportLayer"); } void MemImportLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp index b05eb68d25..d8351c6c40 100644 --- a/src/armnn/layers/MergeLayer.cpp +++ b/src/armnn/layers/MergeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "MergeLayer.hpp" @@ -29,10 +29,12 @@ MergeLayer* MergeLayer::Clone(Graph& graph) const void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), @@ -40,10 +42,7 @@ void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "MergeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MergeLayer"); } std::vector MergeLayer::InferOutputShapes(const std::vector& inputShapes) const diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp index fb54c3d7ae..9154d788d5 100644 --- a/src/armnn/layers/MinimumLayer.cpp +++ b/src/armnn/layers/MinimumLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp index dd0303a567..f02ee57a2c 100644 --- a/src/armnn/layers/MultiplicationLayer.cpp +++ b/src/armnn/layers/MultiplicationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp index 9011ece821..e8176cec22 100644 --- a/src/armnn/layers/NormalizationLayer.cpp +++ b/src/armnn/layers/NormalizationLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "NormalizationLayer.hpp" @@ -31,18 +31,17 @@ NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "NormalizationLayer"); } void NormalizationLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp index 5ff91880f8..db76244ad1 100644 --- a/src/armnn/layers/OutputLayer.cpp +++ b/src/armnn/layers/OutputLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "OutputLayer.hpp" diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp index 690318d308..296bfa901e 100644 --- a/src/armnn/layers/PadLayer.cpp +++ b/src/armnn/layers/PadLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp index a585e66538..b9380aa44b 100644 --- a/src/armnn/layers/PermuteLayer.cpp +++ b/src/armnn/layers/PermuteLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,18 +42,17 @@ std::vector PermuteLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "PermuteLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PermuteLayer"); } void PermuteLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp index defed9758b..bed49ee059 100644 --- a/src/armnn/layers/Pooling2dLayer.cpp +++ b/src/armnn/layers/Pooling2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -102,18 +102,17 @@ std::vector Pooling2dLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Pooling2dLayer"); } void Pooling2dLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp index 577e19f3cc..e043fac432 100644 --- a/src/armnn/layers/PreCompiledLayer.cpp +++ b/src/armnn/layers/PreCompiledLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp index 2527cb91be..fcf50f2590 100644 --- a/src/armnn/layers/PreluLayer.cpp +++ b/src/armnn/layers/PreluLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -96,10 +96,12 @@ std::vector PreluLayer::InferOutputShapes(const std::vector inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), @@ -108,10 +110,7 @@ void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PreluLayer"); } void PreluLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp index c5155d7bf9..321d985fed 100644 --- a/src/armnn/layers/QLstmLayer.cpp +++ b/src/armnn/layers/QLstmLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "QLstmLayer.hpp" @@ -167,15 +167,17 @@ std::vector QLstmLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape(), // input GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), // previousOutputIn - GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousCellStateIn + GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousCellStateIn }); ARMNN_ASSERT(inferredShapes.size() == 3); @@ -209,10 +211,7 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr, "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null."); - ConditionalThrowIfNotEqual( - "QLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer"); } else { @@ -224,10 +223,7 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr, "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled."); - ConditionalThrowIfNotEqual( - "QLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer"); } if (m_Param.m_ProjectionEnabled) @@ -250,14 +246,10 @@ void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere "QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null."); } - ConditionalThrowIfNotEqual( - "QLstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[1]); - ConditionalThrowIfNotEqual( - "QLstmLayer: TensorShape set on OutputSlot[2] does not match the inferred shape.", - GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredShapes[2]); + ValidateAndCopyShape( + GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "QLstmLayer", 1); + ValidateAndCopyShape( + GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "QLstmLayer", 2); if (m_Param.m_LayerNormEnabled) { diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp index 5cfac25e4b..990d2b4b88 100644 --- a/src/armnn/layers/QuantizeLayer.cpp +++ b/src/armnn/layers/QuantizeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,16 +31,15 @@ Layer* QuantizeLayer::Clone(Graph& graph) const void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); - ConditionalThrowIfNotEqual( - "QuantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizeLayer"); } void QuantizeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp index e26857e01e..432d50dc26 100644 --- a/src/armnn/layers/QuantizedLstmLayer.cpp +++ b/src/armnn/layers/QuantizedLstmLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "QuantizedLstmLayer.hpp" @@ -93,10 +93,12 @@ std::vector QuantizedLstmLayer::InferOutputShapes(const std::vector void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(3, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), // input @@ -135,15 +137,13 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod sha "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null."); // Check output TensorShape(s) match inferred shape - ConditionalThrowIfNotEqual( - "QuantizedLstmLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); - - ConditionalThrowIfNotEqual( - "QuantizedLstmLayer: TensorShape set on OutputSlot[1] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[1]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizedLstmLayer"); + + ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(), + inferredShapes[1], + shapeInferenceMethod, + "QuantizedLstmLayer", + 1); } Layer::ConstantTensors QuantizedLstmLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp index f1a24b1633..62f77df16f 100644 --- a/src/armnn/layers/RankLayer.cpp +++ b/src/armnn/layers/RankLayer.cpp @@ -34,11 +34,12 @@ void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferen IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); - ConditionalThrowIfNotEqual( - "Rank: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), {TensorShape{Dimensionality::Scalar}}); -} + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + const TensorShape inferredShape = TensorShape(Dimensionality::Scalar); + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "RankLayer"); +} void RankLayer::Accept(ILayerVisitor& visitor) const { visitor.VisitRankLayer(this, GetName()); diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp index 0257ca9b15..3f955a57b0 100644 --- a/src/armnn/layers/ReshapeLayer.cpp +++ b/src/armnn/layers/ReshapeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ReshapeLayer.hpp" @@ -38,18 +38,17 @@ std::vector ReshapeLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ReshapeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ReshapeLayer"); } void ReshapeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp index b07eb9a7d6..ab8430ac00 100644 --- a/src/armnn/layers/ResizeLayer.cpp +++ b/src/armnn/layers/ResizeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -60,18 +60,17 @@ std::vector ResizeLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ResizeLayer"); } void ResizeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp index b3aecb27a5..a68b5a4766 100644 --- a/src/armnn/layers/RsqrtLayer.cpp +++ b/src/armnn/layers/RsqrtLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -32,18 +32,17 @@ RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "RsqrtLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "RsqrtLayer"); } void RsqrtLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp index 2aa32e3b44..a31f6037e1 100644 --- a/src/armnn/layers/SliceLayer.cpp +++ b/src/armnn/layers/SliceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -35,18 +35,17 @@ SliceLayer* SliceLayer::Clone(Graph& graph) const void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "SliceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SliceLayer"); } std::vector SliceLayer::InferOutputShapes(const std::vector& inputShapes) const diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp index 56c3792c1b..3da2cb2b00 100644 --- a/src/armnn/layers/SoftmaxLayer.cpp +++ b/src/armnn/layers/SoftmaxLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "SoftmaxLayer.hpp" @@ -31,18 +31,17 @@ SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "SoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SoftmaxLayer"); } void SoftmaxLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp index fbc3ca755d..4eba06691c 100644 --- a/src/armnn/layers/SpaceToBatchNdLayer.cpp +++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -68,19 +68,18 @@ std::vector SpaceToBatchNdLayer::InferOutputShapes(const std::vecto void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "SpaceToBatchNdLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToBatchNdLayer"); } void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp index 5c8e2d4551..4695d812bc 100644 --- a/src/armnn/layers/SpaceToDepthLayer.cpp +++ b/src/armnn/layers/SpaceToDepthLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -61,19 +61,18 @@ std::vector SpaceToDepthLayer::InferOutputShapes(const std::vector< void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToDepthLayer"); } void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp index 9455c88041..60dc9611e8 100644 --- a/src/armnn/layers/SplitterLayer.cpp +++ b/src/armnn/layers/SplitterLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "SplitterLayer.hpp" @@ -141,7 +141,10 @@ std::vector SplitterLayer::InferOutputShapes(const std::vector views; for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++) @@ -156,10 +159,11 @@ void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInf for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++) { - ConditionalThrowIfNotEqual( - "SplitterLayer: View sizes must match output tensor shapes.", - GetOutputSlot(viewIdx).GetTensorInfo().GetShape(), - inferredShapes[viewIdx]); + ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(), + inferredShapes[viewIdx], + shapeInferenceMethod, + "SplitterLayer", + viewIdx); } } diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp index 6e81890290..f5d761bdc5 100644 --- a/src/armnn/layers/StackLayer.cpp +++ b/src/armnn/layers/StackLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "StackLayer.hpp" @@ -60,8 +60,6 @@ std::vector StackLayer::InferOutputShapes(const std::vector( "StackLayer: Num Input Slots must match Num Inputs.", @@ -70,6 +68,10 @@ void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere VerifyLayerConnections(m_Param.m_NumInputs, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + // Constructs and validates input shapes std::vector inputShapes; for (unsigned int i = 0; i < GetNumInputSlots(); ++i) @@ -88,10 +90,7 @@ void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInfere ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StackLayer"); } void StackLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp index d79caf613c..623f4a5b3f 100644 --- a/src/armnn/layers/StandInLayer.cpp +++ b/src/armnn/layers/StandInLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2019 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp index fbe9815c06..fc9df856ec 100644 --- a/src/armnn/layers/StridedSliceLayer.cpp +++ b/src/armnn/layers/StridedSliceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "StridedSliceLayer.hpp" @@ -96,18 +96,17 @@ std::vector StridedSliceLayer::InferOutputShapes( void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "StridedSlice: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StridedSliceLayer"); } void StridedSliceLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp index 0797742aea..82c96428a5 100644 --- a/src/armnn/layers/SubtractionLayer.cpp +++ b/src/armnn/layers/SubtractionLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp index d408de89e7..b763b0804c 100644 --- a/src/armnn/layers/SwitchLayer.cpp +++ b/src/armnn/layers/SwitchLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "SwitchLayer.hpp" @@ -29,28 +29,25 @@ SwitchLayer* SwitchLayer::Clone(Graph& graph) const void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(2, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs."); // Assuming first input is the Input and second input is the Constant std::vector inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()}); ARMNN_ASSERT(inferredShapes.size() == 2); - ConditionalThrowIfNotEqual( - "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SwitchLayer"); - ConditionalThrowIfNotEqual( - "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape( + GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "SwitchLayer", 1); } void SwitchLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp index 8a264253e0..7074be9659 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.cpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -1,13 +1,11 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "TransposeConvolution2dLayer.hpp" #include "LayerCloneBase.hpp" -#include - #include #include @@ -105,10 +103,12 @@ std::vector TransposeConvolution2dLayer::InferOutputShapes( void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { - IgnoreUnused(shapeInferenceMethod); - VerifyLayerConnections(1, CHECK_LOCATION()); + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, shapeInferenceMethod); + ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null."); std::vector expectedOutputShape; @@ -127,10 +127,7 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM ARMNN_ASSERT(expectedOutputShape.size() == 1); - ConditionalThrowIfNotEqual( - "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - expectedOutputShape[0]); + ValidateAndCopyShape(outputShape, expectedOutputShape[0], shapeInferenceMethod, "TransposeConvolution2dLayer"); } Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp index 90f8d1a7db..7dfb003019 100644 --- a/src/armnn/layers/TransposeLayer.cpp +++ b/src/armnn/layers/TransposeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -42,18 +42,17 @@ std::vector TransposeLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); - ConditionalThrowIfNotEqual( - "TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", - GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "TransposeLayer"); } void TransposeLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp index 3bc993b33b..6198ca8a24 100644 --- a/src/armnn/test/FlowControl.cpp +++ b/src/armnn/test/FlowControl.cpp @@ -28,13 +28,18 @@ BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) std::vector falseData = {0}; ConstTensor falseTensor(armnn::TensorInfo({1}, armnn::DataType::Boolean), falseData); - IConnectableLayer* constLayer = net->AddConstantLayer(falseTensor, "const"); + IConnectableLayer* constLayer = net->AddConstantLayer(falseTensor, "const"); constLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); IConnectableLayer* input = net->AddInputLayer(0); + input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); IConnectableLayer* switchLayer = net->AddSwitchLayer("switch"); + switchLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); + switchLayer->GetOutputSlot(1).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); + IConnectableLayer* mergeLayer = net->AddMergeLayer("merge"); + mergeLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({1}, armnn::DataType::Boolean)); IConnectableLayer* output = net->AddOutputLayer(0); diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp new file mode 100644 index 0000000000..21df1f0e13 --- /dev/null +++ b/src/armnn/test/ShapeInferenceTests.cpp @@ -0,0 +1,694 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include +#include +#include +#include +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(ShapeInferenceTests) +using namespace armnn; +namespace +{ + +constexpr const bool maskPermutations[6][4] = {{false, false, false, false}, + {true, false, false, false}, + {false, true, false, false}, + {false, false, true, false}, + {false, false, false, true}, + {true, true, true, true}}; + +template +LayerT* BuildGraph(Graph* graph, const std::vector& inputShapes, Args &&... args) +{ + auto layer = graph->AddLayer(std::forward(args)...); + + uint32_t inputCount = 0; + for (auto inputShape : inputShapes) + { + TensorInfo inputTensorInfo(inputShape, DataType::Float32); + + auto input = graph->AddLayer(static_cast(inputCount), "input"); + input->GetOutputSlot().SetTensorInfo(inputTensorInfo); + input->GetOutputSlot().Connect(layer->GetInputSlot(inputCount)); + inputCount++; + } + + return layer; +} + +template +void RunShapeInferenceTest(LayerT* const layer, + const std::vector> dimensionSizeLists) +{ + std::vector numDimensions; + std::vector expectedOutputShapes; + + for (auto dimensionSizeList : dimensionSizeLists) + { + numDimensions.emplace_back(dimensionSizeList.size()); + expectedOutputShapes.emplace_back(TensorShape(dimensionSizeList)); + } + + const unsigned int outputSize = layer->GetNumOutputSlots(); + + const auto runTestWithMask = [&](const bool maskPermutations[], ShapeInferenceMethod shapeInferenceMethod) + { + for (unsigned int i = 0; i < outputSize; ++i) + { + layer->GetOutputSlot(i).SetTensorInfo({{numDimensions[i], dimensionSizeLists[i].begin(), maskPermutations}, + DataType::Float32}); + } + + layer->ValidateTensorShapesFromInputs(shapeInferenceMethod); + + for (unsigned int i = 0; i < outputSize; ++i) + { + BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]); + } + }; + + // Test inference with Dimensionality::NotSpecified + for (unsigned int j = 0; j < outputSize; ++j) + { + layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32}); + } + + BOOST_CHECK_THROW( + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate); + + for (unsigned int i = 0; i < outputSize; ++i) + { + BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]); + } + + // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size + for (unsigned int i = 0; i <= numDimensions[0]; ++i) + { + runTestWithMask(maskPermutations[i], ShapeInferenceMethod::InferAndValidate); + } + + // maskPermutations[5] equates to all dimensions being known + runTestWithMask(maskPermutations[5], ShapeInferenceMethod::ValidateOnly); + + BOOST_CHECK_THROW( + runTestWithMask(maskPermutations[5], ShapeInferenceMethod::InferAndValidate), LayerValidationException); +} + +template +void CreateGraphAndRunTest(const std::vector& inputShapes, + const std::vector> dimensionSizeLists, + Args &&... args) +{ + Graph graph; + + auto layer = BuildGraph(&graph, inputShapes, std::forward(args)...); + + RunShapeInferenceTest(layer, dimensionSizeLists); +} + +BOOST_AUTO_TEST_CASE(AbsTest) +{ + ActivationDescriptor descriptor; + descriptor.m_Function = ActivationFunction::Abs; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation"); +} + +BOOST_AUTO_TEST_CASE(AdditionTest) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add"); +} + +BOOST_AUTO_TEST_CASE(ArgMinMaxTest) +{ + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Function = ArgMinMaxFunction::Min; + descriptor.m_Axis = 1; + + CreateGraphAndRunTest({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax"); +} + +BOOST_AUTO_TEST_CASE(BatchNormalizationTest) +{ + BatchNormalizationDescriptor descriptor; + CreateGraphAndRunTest({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm"); +} + +BOOST_AUTO_TEST_CASE(BatchToSpaceNdTest) +{ + BatchToSpaceNdDescriptor descriptor; + + std::vector blockShape {2, 2}; + std::vector> crops = {{0, 0}, {0, 0}}; + + descriptor.m_BlockShape = blockShape; + descriptor.m_Crops = crops; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend"); +} + +BOOST_AUTO_TEST_CASE(ComparisionTest) +{ + ComparisonDescriptor descriptor; + descriptor.m_Operation = ComparisonOperation::Equal; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, + {{ 5, 7, 6, 2 }}, + descriptor, + "comparision"); +} + +BOOST_AUTO_TEST_CASE(ConcatTest) +{ + ConcatDescriptor descriptor(2, 3); + + descriptor.SetViewOriginCoord(0, 0, 0); + descriptor.SetViewOriginCoord(1, 0, 1); + + CreateGraphAndRunTest({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat"); +} + +BOOST_AUTO_TEST_CASE(ConstantTesst) +{ + Graph graph; + TensorShape outputShape{ 1, 1, 3, 3 }; + auto layer = BuildGraph(&graph, {}, "constant"); + + const float Datum = 0.0f; + ConstTensor output0({outputShape, DataType::Float32}, &Datum); + layer->m_LayerOutput = std::make_unique(output0); + + layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32}); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly); + + BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape); +} + +BOOST_AUTO_TEST_CASE(ConvertBf16ToFp32Test) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(ConvertFp16ToBf16Test) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(ConvertFp16ToFp32Test) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(ConvertFp32ToFp16Test) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(Convolution2dTest) +{ + const TensorShape inputShape{1, 1, 10, 10}; + + Graph graph; + + Convolution2dDescriptor descriptor; + + descriptor.m_PadLeft = 0; + descriptor.m_PadTop = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadBottom = 0; + descriptor.m_StrideX = 1; + descriptor.m_StrideY = 1; + descriptor.m_DilationX = 3; + descriptor.m_DilationY = 3; + + auto layer = BuildGraph(&graph, + {inputShape}, + descriptor, + "conv2d"); + + const float Datum = 0.0f; + ConstTensor weights({{1, 1, 3, 3}, DataType::Float32}, &Datum); + layer->m_Weight = std::make_unique(weights); + + RunShapeInferenceTest(layer, {{ 1, 1, 4, 4 }}); +} + +BOOST_AUTO_TEST_CASE(DebugLayerTest) +{ + const TensorShape tensorShape; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug"); +} + +BOOST_AUTO_TEST_CASE(DepthToSpaceTest) +{ + DepthToSpaceDescriptor descriptor; + + descriptor.m_BlockSize = 2; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace"); +} + +BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest) +{ + DepthwiseConvolution2dDescriptor descriptor; + + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 1; + descriptor.m_PadLeft = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_DilationX = 0; + descriptor.m_DilationY = 0; + descriptor.m_DataLayout = DataLayout::NHWC; + descriptor.m_BiasEnabled = false; + + Graph graph; + + auto layer = BuildGraph(&graph, + {{ 8, 16, 2, 1 }}, + descriptor, + "depthwiseconv2d"); + + const float Datum = 0.0f; + ConstTensor weights({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + layer->m_Weight = std::make_unique(weights); + + RunShapeInferenceTest(layer, {{ 8, 18, 1, 2 }}); +} + +BOOST_AUTO_TEST_CASE(DequantizeTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize"); +} + +BOOST_AUTO_TEST_CASE(DetectionPostProcessTest) +{ + const TensorShape detectionBoxesInfo{ 1, 3, 4 }; + const TensorShape detectionScoresInfo{ 1, 3, 4 }; + const TensorShape detectionClassesInfo{ 1, 3, 4 }; + + armnn::DetectionPostProcessDescriptor descriptor; + descriptor.m_UseRegularNms = true; + descriptor.m_MaxDetections = 3; + descriptor.m_MaxClassesPerDetection = 1; + descriptor.m_DetectionsPerClass =1; + descriptor.m_NmsScoreThreshold = 0.0; + descriptor.m_NmsIouThreshold = 0.5; + descriptor.m_NumClasses = 2; + descriptor.m_ScaleY = 10.0; + descriptor.m_ScaleX = 10.0; + descriptor.m_ScaleH = 5.0; + descriptor.m_ScaleW = 5.0; + + const float Datum = 0.0f; + ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32}, &Datum); + + Graph graph; + + auto layer = BuildGraph(&graph, + {detectionBoxesInfo, detectionScoresInfo}, + descriptor, + "detectionpostprocess"); + + layer->m_Anchors = std::make_unique(anchorsTensor); + + RunShapeInferenceTest(layer, {{ 1, 3, 4 }, { 1, 3 }, { 1, 3 }, { 1 }}); +} + +BOOST_AUTO_TEST_CASE(FakeQuantizationTest) +{ + FakeQuantizationDescriptor descriptor; + descriptor.m_Max = 1; + descriptor.m_Min = 1; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization"); +} + +BOOST_AUTO_TEST_CASE(FloorTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor"); +} + +BOOST_AUTO_TEST_CASE(FullyConnectedTest) +{ + Graph graph; + + const unsigned int inputWidth = 3u; + const unsigned int inputHeight = 2u; + const unsigned int inputChannels = 1u; + const unsigned int outputChannels = 2u; + + auto layer = BuildGraph(&graph, + {{1, inputChannels, inputHeight, inputWidth}}, + FullyConnectedDescriptor(), + "fc"); + + + const float Datum = 0.0f; + ConstTensor weights({{inputChannels, outputChannels}, DataType::Float32}, &Datum); + layer->m_Weight = std::make_unique(weights); + + RunShapeInferenceTest(layer, {{ 1, outputChannels }}); +} + +BOOST_AUTO_TEST_CASE(GatherTest) +{ + CreateGraphAndRunTest({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather"); +} + +BOOST_AUTO_TEST_CASE(InstanceNormalizationTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, + InstanceNormalizationDescriptor(), + "instancenorm"); +} + +BOOST_AUTO_TEST_CASE(L2NormalizationTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, + L2NormalizationDescriptor(), + "l2norm"); +} + +BOOST_AUTO_TEST_CASE(LogSoftMaxTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax"); +} + +BOOST_AUTO_TEST_CASE(LstmTest) +{ + const TensorShape inputShape{2, 5}; + const TensorShape inputCellState{2, 20}; + const TensorShape expectedOutputShape{2, 20}; + + LstmDescriptor descriptor; + + descriptor.m_ActivationFunc = 4; + descriptor.m_CifgEnabled = false; + descriptor.m_PeepholeEnabled = false; + descriptor.m_ProjectionEnabled = false; + + Graph graph; + auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm"); + + float Datum = 0.0f; + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + + layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_CellBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_ForgetGateBias = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_OutputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputToInputWeights = std::make_unique(constTensor); + + RunShapeInferenceTest(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}}); +} + +BOOST_AUTO_TEST_CASE(MeanLayerTest) +{ + MeanDescriptor descriptor; + descriptor.m_Axis = {0}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean"); +} + +BOOST_AUTO_TEST_CASE(MemCopyTest) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy"); +} + +BOOST_AUTO_TEST_CASE(MemImportTest) +{ + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport"); +} + +BOOST_AUTO_TEST_CASE(MergeTest) +{ + const TensorShape tensorShape{ 5, 7, 6, 2 }; + CreateGraphAndRunTest({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge"); +} + +BOOST_AUTO_TEST_CASE(NormalizationTest) +{ + const TensorShape tensorShape{5, 7, 6, 2}; + + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm"); +} + +BOOST_AUTO_TEST_CASE(PermuteTest) +{ + PermuteDescriptor descriptor; + descriptor.m_DimMappings = {0U, 2U, 3U, 1U}; + + CreateGraphAndRunTest({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute"); +} + +BOOST_AUTO_TEST_CASE(Pooling2dTest) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 4; + descriptor.m_PadLeft = descriptor.m_PadRight = 3; + descriptor.m_PadTop = descriptor.m_PadBottom = 0; + descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + CreateGraphAndRunTest({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d"); +} + +BOOST_AUTO_TEST_CASE(QLstmTest) +{ + const TensorShape inputShape{2, 5}; + const TensorShape inputCellState{2, 20}; + const TensorShape expectedOutputShape{2, 20}; + + QLstmDescriptor descriptor; + + descriptor.m_CifgEnabled = false; + descriptor.m_PeepholeEnabled = false; + descriptor.m_ProjectionEnabled = false; + + Graph graph; + auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm"); + + float Datum = 0.0f; + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + + layer->m_BasicParameters.m_InputToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_CellBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_ForgetGateBias = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_OutputGateBias = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique(constTensor); + layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique(constTensor); + layer->m_CifgParameters.m_InputToInputWeights = std::make_unique(constTensor); + + RunShapeInferenceTest(layer, {{2, 20}, {2, 20}, {2, 20}}); +} + +BOOST_AUTO_TEST_CASE(QuantizedLstmTest) +{ + const TensorShape inputShape{2, 5}; + const TensorShape inputCellState{2, 20}; + const TensorShape expectedOutputShape{2, 20}; + + Graph graph; + auto layer = BuildGraph(&graph, {inputShape, inputCellState, inputCellState}, "quatizedlstm"); + + float Datum = 0.0f; + ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32}, &Datum); + + layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique(constTensor); + layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique(constTensor); + + RunShapeInferenceTest(layer, {{2, 20}, {2, 20}, {2, 20}}); +} + +BOOST_AUTO_TEST_CASE(QuantizeTest) +{ + const TensorShape tensorShape { 5, 4, 7, 6 }; + CreateGraphAndRunTest({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean"); +} + +BOOST_AUTO_TEST_CASE(RankTest) +{ + // due to rank having a scalar output we need a custom test + const TensorShape expectedOutputs(Dimensionality::Scalar); + + Graph graph; + auto layer = BuildGraph(&graph, {{ 1, 1, 1, 1 }}, "rank"); + + layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32}); + + BOOST_CHECK_THROW( + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate); + + BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs); + + layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32}); + + layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly); + + BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs); +} + +BOOST_AUTO_TEST_CASE(ReshapeTest) +{ + ReshapeDescriptor descriptor; + + descriptor.m_TargetShape = { 1, 1, 1, 8 }; + + CreateGraphAndRunTest({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape"); +} + +BOOST_AUTO_TEST_CASE(ResizeTest) +{ + ResizeDescriptor descriptor; + + descriptor.m_TargetHeight = 6; + descriptor.m_TargetWidth = 2; + + CreateGraphAndRunTest({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize"); +} + +BOOST_AUTO_TEST_CASE(SliceTest) +{ + SliceDescriptor descriptor; + descriptor.m_Begin = { 1, 0, 1, 2 }; + descriptor.m_Size = { 2, 1, 2, 3 }; + + CreateGraphAndRunTest({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean"); +} + +BOOST_AUTO_TEST_CASE(SpaceToBatchNdTest) +{ + SpaceToBatchNdDescriptor descriptor; + + std::vector blockShape {2, 2}; + std::vector> padlist = {{0, 0}, {0, 0}}; + + descriptor.m_BlockShape = blockShape; + descriptor.m_PadList = padlist; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd"); +} + +BOOST_AUTO_TEST_CASE(SpaceToDepth) +{ + SpaceToDepthDescriptor descriptor; + + descriptor.m_BlockSize = 2; + descriptor.m_DataLayout = DataLayout::NHWC; + + CreateGraphAndRunTest({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth"); +} + +BOOST_AUTO_TEST_CASE(SplitterTest) +{ + SplitterDescriptor descriptor(2, 3); + + descriptor.SetViewSize(0, 0, 1); + descriptor.SetViewSize(0, 1, 2); + descriptor.SetViewSize(0, 2, 2); + + descriptor.SetViewSize(1, 0, 1); + descriptor.SetViewSize(1, 1, 2); + descriptor.SetViewSize(1, 2, 2); + + CreateGraphAndRunTest({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter"); +} + +BOOST_AUTO_TEST_CASE(StackTest) +{ + StackDescriptor descriptor; + + descriptor.m_Axis = 0; + descriptor.m_NumInputs = 2; + descriptor.m_InputShape = { 3, 2, 3 }; + + CreateGraphAndRunTest({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack"); +} + +BOOST_AUTO_TEST_CASE(StridedSliceTest) +{ + StridedSliceDescriptor descriptor; + + descriptor.m_Begin = {0, 0, 0, 0}; + descriptor.m_End = {3, 2, 3, 1}; + descriptor.m_Stride = {2, 2, 2, 1}; + + CreateGraphAndRunTest({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice"); +} + +BOOST_AUTO_TEST_CASE(Switchtest) +{ + CreateGraphAndRunTest({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch"); +} + +BOOST_AUTO_TEST_CASE(TransposeConvolution2dTest) +{ + StridedSliceDescriptor descriptor; + + descriptor.m_Begin = {0, 0, 0, 0}; + descriptor.m_End = {3, 2, 3, 1}; + descriptor.m_Stride = {2, 2, 2, 1}; + + CreateGraphAndRunTest({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t"); +} + +BOOST_AUTO_TEST_CASE(TransposeTest) +{ + armnn::TransposeDescriptor descriptor; + descriptor.m_DimMappings = {0U, 3U, 1U, 2U}; + + CreateGraphAndRunTest({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice"); +} + +BOOST_AUTO_TEST_SUITE_END() +} \ No newline at end of file -- cgit v1.2.1