From c8724c7b9ff663538bd32ad789dbcc3e1aa88637 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Tue, 8 Oct 2019 15:41:34 +0100 Subject: IVGCVSW-3944 Add ArgMinMax output shape validation Signed-off-by: James Conroy Change-Id: I469895da158b062cd19248832525fa21527f7d41 --- src/armnn/layers/ArgMinMaxLayer.cpp | 39 ++++++++++ src/armnn/layers/ArgMinMaxLayer.hpp | 5 ++ src/armnn/test/InferOutputTests.cpp | 6 ++ src/armnn/test/InferOutputTests.hpp | 86 ++++++++++++++++++++++ src/backends/backendsCommon/WorkloadData.cpp | 36 +++++++++ .../test/layerTests/ArgMinMaxTestImpl.cpp | 4 +- src/backends/neon/test/NeonLayerTests.cpp | 2 - 7 files changed, 174 insertions(+), 4 deletions(-) diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp index aad95eb0cf..bfd71d519b 100644 --- a/src/armnn/layers/ArgMinMaxLayer.cpp +++ b/src/armnn/layers/ArgMinMaxLayer.cpp @@ -6,6 +6,8 @@ #include "LayerCloneBase.hpp" +#include + #include #include #include @@ -30,6 +32,43 @@ ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } +std::vector ArgMinMaxLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + BOOST_ASSERT(inputShapes.size() == 1); + + TensorShape inputShape = inputShapes[0]; + auto inputNumDimensions = inputShape.GetNumDimensions(); + + auto axis = m_Param.m_Axis; + auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis); + + BOOST_ASSERT(unsignedAxis <= inputNumDimensions); + + // 1D input shape results in scalar output + if (inputShape.GetNumDimensions() == 1) + { + std::vector tensorDimensions(1, 1); + TensorShape outputShape(1, tensorDimensions.data()); + + return std::vector({ outputShape }); + } + + std::vector tensorDimensions(inputNumDimensions - 1, 0); + for (unsigned int i = 0; i < unsignedAxis; ++i) + { + tensorDimensions[i] = inputShape[i]; + } + + for (unsigned int i = unsignedAxis + 1; i < inputNumDimensions; ++i) + { + tensorDimensions[i - 1] = inputShape[i]; + } + + TensorShape outputShape = TensorShape(inputNumDimensions - 1, tensorDimensions.data()); + + return std::vector({ outputShape }); +} + void ArgMinMaxLayer::ValidateTensorShapesFromInputs() { VerifyLayerConnections(1, CHECK_LOCATION()); diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp index ca1337f065..43ea056c9e 100644 --- a/src/armnn/layers/ArgMinMaxLayer.hpp +++ b/src/armnn/layers/ArgMinMaxLayer.hpp @@ -25,6 +25,11 @@ public: /// @param [in] graph The graph into which this layer is being cloned. ArgMinMaxLayer* Clone(Graph& graph) const override; + /// Infers the output shape from a given input shape and axis parameter. + /// @param [in] inputShapes The vector of input shapes for ArgMinMax. + /// @return A vector of inferred output shapes. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ArgMinMaxLayer. void ValidateTensorShapesFromInputs() override; diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp index 8606745623..3293cef0f7 100644 --- a/src/armnn/test/InferOutputTests.cpp +++ b/src/armnn/test/InferOutputTests.cpp @@ -11,6 +11,12 @@ BOOST_AUTO_TEST_SUITE(LayerValidateOutput) +// ArgMinMax +ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape4d, ArgMinMaxInferOutputShape4dTest) +ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape3d, ArgMinMaxInferOutputShape3dTest) +ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape2d, ArgMinMaxInferOutputShape2dTest) +ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape1d, ArgMinMaxInferOutputShape1dTest) + // BatchToSpace ARMNN_SIMPLE_TEST_CASE(BatchToSpaceInferOutputShape, BatchToSpaceInferOutputShapeTest) diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp index c428a9db61..feb2125656 100644 --- a/src/armnn/test/InferOutputTests.hpp +++ b/src/armnn/test/InferOutputTests.hpp @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -18,6 +19,91 @@ #include #include +void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor, + const std::vector& inputShapes, + std::vector& outputShapes) +{ + armnn::Graph graph; + auto argMinMaxLayer = graph.AddLayer(descriptor, "argMinMax"); + outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes); +} + +void ArgMinMaxInferOutputShape4dTest() +{ + armnn::Graph graph; + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Axis = 2; + + const std::vector inputShapes + { + { 1, 3, 2, 4 } + }; + + std::vector outputShapes; + BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes)); + + armnn::TensorShape expectedOutputShape( { 1, 3, 4 } ); + BOOST_CHECK(outputShapes.size() == 1); + BOOST_CHECK(outputShapes[0] == expectedOutputShape); +} + +void ArgMinMaxInferOutputShape3dTest() +{ + armnn::Graph graph; + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Axis = 0; + + const std::vector inputShapes + { + { 1, 3, 2 } + }; + + std::vector outputShapes; + BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes)); + + armnn::TensorShape expectedOutputShape( { 3, 2 } ); + BOOST_CHECK(outputShapes.size() == 1); + BOOST_CHECK(outputShapes[0] == expectedOutputShape); +} + +void ArgMinMaxInferOutputShape2dTest() +{ + armnn::Graph graph; + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Axis = 1; + + const std::vector inputShapes + { + { 3, 2 } + }; + + std::vector outputShapes; + BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes)); + + armnn::TensorShape expectedOutputShape( { 3 } ); + BOOST_CHECK(outputShapes.size() == 1); + BOOST_CHECK(outputShapes[0] == expectedOutputShape); +} + +void ArgMinMaxInferOutputShape1dTest() +{ + armnn::Graph graph; + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Axis = 0; + + const std::vector inputShapes + { + { 5 } + }; + + std::vector outputShapes; + BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes)); + + armnn::TensorShape expectedOutputShape( { 1 } ); + BOOST_CHECK(outputShapes.size() == 1); + BOOST_CHECK(outputShapes[0] == expectedOutputShape); +} + void BatchToSpaceInferOutputShapeTest() { armnn::Graph graph; diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 89277d798f..ea0e5c82b8 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -15,6 +15,7 @@ #include #include +#include using namespace armnnUtils; @@ -485,6 +486,41 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const }; ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName); + + auto inputShape = inputTensorInfo.GetShape(); + auto outputShape = outputTensorInfo.GetShape(); + + auto inputNumDimensions = inputShape.GetNumDimensions(); + auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis); + + const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."}; + + // 1D input shape results in scalar output shape + if (inputShape.GetNumDimensions() == 1) + { + if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1) + { + throw InvalidArgumentException(descriptorName + outputShapeError); + } + } + else + { + for (unsigned int i = 0; i < unsignedAxis; ++i) + { + if (outputShape[i] != inputShape[i]) + { + throw InvalidArgumentException(descriptorName + outputShapeError); + } + } + + for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i) + { + if (outputShape[i - 1] != inputShape[i]) + { + throw InvalidArgumentException(descriptorName + outputShapeError); + } + } + } } void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp index e023d60bf0..be7ef4e32e 100644 --- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -190,7 +190,7 @@ LayerTestResult ArgMaxHeightTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { const armnn::TensorShape inputShape{ 1, 3, 2, 4}; - const armnn::TensorShape outputShape{ 3, 1, 4 }; + const armnn::TensorShape outputShape{ 1, 3, 4 }; armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); @@ -219,7 +219,7 @@ LayerTestResult ArgMinWidthTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { const armnn::TensorShape inputShape{ 1, 3, 2, 4}; - const armnn::TensorShape outputShape{ 3, 2, 1 }; + const armnn::TensorShape outputShape{ 1, 3, 2 }; armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 1d8aa11085..920fb0baf2 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -923,8 +923,6 @@ ARMNN_AUTO_TEST_CASE(ArgMinFloat32, ArgMinSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMaxFloat32, ArgMaxSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMinChannel, ArgMinChannelTest) ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest) -ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest) -ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest) #if defined(ARMNNREF_ENABLED) -- cgit v1.2.1