ArmNN
 23.08
ArgMinMaxLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxLayer.hpp"
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
10 
12 
15 
16 namespace armnn
17 {
18 
19 ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name)
20  : LayerWithParameters(1, 1, LayerType::ArgMinMax, param, name)
21 {
22 }
23 
24 std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
25 {
26  ArgMinMaxQueueDescriptor descriptor;
27  SetAdditionalInfo(descriptor);
28 
29  return factory.CreateWorkload(LayerType::ArgMinMax, descriptor, PrepInfoAndDesc(descriptor));
30 }
31 
33 {
34  return CloneBase<ArgMinMaxLayer>(graph, m_Param, GetName());
35 }
36 
37 std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
38 {
39  ARMNN_ASSERT(inputShapes.size() == 1);
40 
41  TensorShape inputShape = inputShapes[0];
42  auto inputNumDimensions = inputShape.GetNumDimensions();
43 
44  auto axis = m_Param.m_Axis;
45  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
46 
47  ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
48 
49  // 1D input shape results in scalar output
50  if (inputShape.GetNumDimensions() == 1)
51  {
52  std::vector<unsigned int> tensorDimensions(1, 1);
53  TensorShape outputShape(1, tensorDimensions.data());
54 
55  return std::vector<TensorShape>({ outputShape });
56  }
57 
58  std::vector<unsigned int> tensorDimensions(inputNumDimensions - 1, 0);
59  for (unsigned int i = 0; i < unsignedAxis; ++i)
60  {
61  tensorDimensions[i] = inputShape[i];
62  }
63 
64  for (unsigned int i = unsignedAxis + 1; i < inputNumDimensions; ++i)
65  {
66  tensorDimensions[i - 1] = inputShape[i];
67  }
68 
69  TensorShape outputShape = TensorShape(inputNumDimensions - 1, tensorDimensions.data());
70 
71  return std::vector<TensorShape>({ outputShape });
72 }
73 
75 {
77 
78  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
79 
81 
82  auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
83 
84  ARMNN_ASSERT(inferredShapes.size() == 1);
85 
86  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
87 }
88 
90 {
91  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
92 }
93 
94 } // namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::ArgMinMaxLayer::Clone
ArgMinMaxLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: ArgMinMaxLayer.cpp:32
armnn::ArgMinMaxLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the ArgMinMax type.
Definition: ArgMinMaxLayer.cpp:24
armnn::ArgMinMaxLayer::ArgMinMaxLayer
ArgMinMaxLayer(const ArgMinMaxDescriptor &param, const char *name)
Constructor to create a ArgMinMaxLayer.
Definition: ArgMinMaxLayer.cpp:19
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
WorkloadData.hpp
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnnUtils::GetUnsignedAxis
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
Definition: TensorUtils.cpp:221
ArgMinMaxLayer.hpp
TypesUtils.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:435
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::ArgMinMaxLayer
This layer represents a ArgMinMax operation.
Definition: ArgMinMaxLayer.hpp:14
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters< ArgMinMaxDescriptor >::GetParameters
const ArgMinMaxDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
WorkloadFactory.hpp
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
TensorUtils.hpp
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LayerWithParameters< ArgMinMaxDescriptor >::m_Param
ArgMinMaxDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::ArgMinMax
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
armnn::LayerWithParameters< ArgMinMaxDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:504
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:287
armnn::ArgMinMaxLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ArgMinMaxLayer.
Definition: ArgMinMaxLayer.cpp:74
armnn::ArgMinMaxLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from a given input shape and axis parameter.
Definition: ArgMinMaxLayer.cpp:37
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ArgMinMaxLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: ArgMinMaxLayer.cpp:89
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:391
armnn::ArgMinMaxQueueDescriptor
Definition: WorkloadData.hpp:163
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::Graph
Definition: Graph.hpp:30
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
LayerCloneBase.hpp