aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp39
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.hpp5
-rw-r--r--src/armnn/test/InferOutputTests.cpp6
-rw-r--r--src/armnn/test/InferOutputTests.hpp86
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp36
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp4
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp2
7 files changed, 174 insertions, 4 deletions
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index aad95eb0cf..bfd71d519b 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -6,6 +6,8 @@
#include "LayerCloneBase.hpp"
+#include <TensorUtils.hpp>
+
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -30,6 +32,43 @@ ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
return CloneBase<ArgMinMaxLayer>(graph, m_Param, GetName());
}
+std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 1);
+
+ TensorShape inputShape = inputShapes[0];
+ auto inputNumDimensions = inputShape.GetNumDimensions();
+
+ auto axis = m_Param.m_Axis;
+ auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
+
+ BOOST_ASSERT(unsignedAxis <= inputNumDimensions);
+
+ // 1D input shape results in scalar output
+ if (inputShape.GetNumDimensions() == 1)
+ {
+ std::vector<unsigned int> tensorDimensions(1, 1);
+ TensorShape outputShape(1, tensorDimensions.data());
+
+ return std::vector<TensorShape>({ outputShape });
+ }
+
+ std::vector<unsigned int> tensorDimensions(inputNumDimensions - 1, 0);
+ for (unsigned int i = 0; i < unsignedAxis; ++i)
+ {
+ tensorDimensions[i] = inputShape[i];
+ }
+
+ for (unsigned int i = unsignedAxis + 1; i < inputNumDimensions; ++i)
+ {
+ tensorDimensions[i - 1] = inputShape[i];
+ }
+
+ TensorShape outputShape = TensorShape(inputNumDimensions - 1, tensorDimensions.data());
+
+ return std::vector<TensorShape>({ outputShape });
+}
+
void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index ca1337f065..43ea056c9e 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -25,6 +25,11 @@ public:
/// @param [in] graph The graph into which this layer is being cloned.
ArgMinMaxLayer* Clone(Graph& graph) const override;
+ /// Infers the output shape from a given input shape and axis parameter.
+ /// @param [in] inputShapes The vector of input shapes for ArgMinMax.
+ /// @return A vector of inferred output shapes.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ArgMinMaxLayer.
void ValidateTensorShapesFromInputs() override;
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 8606745623..3293cef0f7 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -11,6 +11,12 @@
BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
+// ArgMinMax
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape4d, ArgMinMaxInferOutputShape4dTest)
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape3d, ArgMinMaxInferOutputShape3dTest)
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape2d, ArgMinMaxInferOutputShape2dTest)
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape1d, ArgMinMaxInferOutputShape1dTest)
+
// BatchToSpace
ARMNN_SIMPLE_TEST_CASE(BatchToSpaceInferOutputShape, BatchToSpaceInferOutputShapeTest)
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index c428a9db61..feb2125656 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -10,6 +10,7 @@
#include <armnn/ArmNN.hpp>
#include <Graph.hpp>
+#include <layers/ArgMinMaxLayer.hpp>
#include <layers/BatchToSpaceNdLayer.hpp>
#include <layers/SpaceToDepthLayer.hpp>
#include <layers/PreluLayer.hpp>
@@ -18,6 +19,91 @@
#include <boost/algorithm/string.hpp>
#include <boost/test/unit_test.hpp>
+void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor descriptor,
+ const std::vector<armnn::TensorShape>& inputShapes,
+ std::vector<armnn::TensorShape>& outputShapes)
+{
+ armnn::Graph graph;
+ auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
+ outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
+}
+
+void ArgMinMaxInferOutputShape4dTest()
+{
+ armnn::Graph graph;
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Axis = 2;
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 1, 3, 2, 4 }
+ };
+
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void ArgMinMaxInferOutputShape3dTest()
+{
+ armnn::Graph graph;
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Axis = 0;
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 1, 3, 2 }
+ };
+
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape( { 3, 2 } );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void ArgMinMaxInferOutputShape2dTest()
+{
+ armnn::Graph graph;
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Axis = 1;
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 3, 2 }
+ };
+
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape( { 3 } );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void ArgMinMaxInferOutputShape1dTest()
+{
+ armnn::Graph graph;
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Axis = 0;
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 5 }
+ };
+
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape( { 1 } );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
void BatchToSpaceInferOutputShapeTest()
{
armnn::Graph graph;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 89277d798f..ea0e5c82b8 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -15,6 +15,7 @@
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
+#include <TensorUtils.hpp>
using namespace armnnUtils;
@@ -485,6 +486,41 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
};
ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
+
+ auto inputShape = inputTensorInfo.GetShape();
+ auto outputShape = outputTensorInfo.GetShape();
+
+ auto inputNumDimensions = inputShape.GetNumDimensions();
+ auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
+
+ const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
+
+ // 1D input shape results in scalar output shape
+ if (inputShape.GetNumDimensions() == 1)
+ {
+ if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
+ {
+ throw InvalidArgumentException(descriptorName + outputShapeError);
+ }
+ }
+ else
+ {
+ for (unsigned int i = 0; i < unsignedAxis; ++i)
+ {
+ if (outputShape[i] != inputShape[i])
+ {
+ throw InvalidArgumentException(descriptorName + outputShapeError);
+ }
+ }
+
+ for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
+ {
+ if (outputShape[i - 1] != inputShape[i])
+ {
+ throw InvalidArgumentException(descriptorName + outputShapeError);
+ }
+ }
+ }
}
void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index e023d60bf0..be7ef4e32e 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -190,7 +190,7 @@ LayerTestResult<int32_t, 3> ArgMaxHeightTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
const armnn::TensorShape inputShape{ 1, 3, 2, 4};
- const armnn::TensorShape outputShape{ 3, 1, 4 };
+ const armnn::TensorShape outputShape{ 1, 3, 4 };
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
@@ -219,7 +219,7 @@ LayerTestResult<int32_t, 3> ArgMinWidthTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
const armnn::TensorShape inputShape{ 1, 3, 2, 4};
- const armnn::TensorShape outputShape{ 3, 2, 1 };
+ const armnn::TensorShape outputShape{ 1, 3, 2 };
armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 1d8aa11085..920fb0baf2 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -923,8 +923,6 @@ ARMNN_AUTO_TEST_CASE(ArgMinFloat32, ArgMinSimpleTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMaxFloat32, ArgMaxSimpleTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMinChannel, ArgMinChannelTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
#if defined(ARMNNREF_ENABLED)