ArmNN
 22.11
ArgMinMaxLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxLayer.hpp"
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
10 
12 
15 
16 namespace armnn
17 {
18 
19 ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name)
20  : LayerWithParameters(1, 1, LayerType::ArgMinMax, param, name)
21 {
22 }
23 
24 std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const IWorkloadFactory& factory) const
25 {
26  ArgMinMaxQueueDescriptor descriptor;
27  SetAdditionalInfo(descriptor);
28 
29  return factory.CreateWorkload(LayerType::ArgMinMax, descriptor, PrepInfoAndDesc(descriptor));
30 }
31 
33 {
34  return CloneBase<ArgMinMaxLayer>(graph, m_Param, GetName());
35 }
36 
37 std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
38 {
39  ARMNN_ASSERT(inputShapes.size() == 1);
40 
41  TensorShape inputShape = inputShapes[0];
42  auto inputNumDimensions = inputShape.GetNumDimensions();
43 
44  auto axis = m_Param.m_Axis;
45  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
46 
47  ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
48 
49  // 1D input shape results in scalar output
50  if (inputShape.GetNumDimensions() == 1)
51  {
52  std::vector<unsigned int> tensorDimensions(1, 1);
53  TensorShape outputShape(1, tensorDimensions.data());
54 
55  return std::vector<TensorShape>({ outputShape });
56  }
57 
58  std::vector<unsigned int> tensorDimensions(inputNumDimensions - 1, 0);
59  for (unsigned int i = 0; i < unsignedAxis; ++i)
60  {
61  tensorDimensions[i] = inputShape[i];
62  }
63 
64  for (unsigned int i = unsignedAxis + 1; i < inputNumDimensions; ++i)
65  {
66  tensorDimensions[i - 1] = inputShape[i];
67  }
68 
69  TensorShape outputShape = TensorShape(inputNumDimensions - 1, tensorDimensions.data());
70 
71  return std::vector<TensorShape>({ outputShape });
72 }
73 
75 {
77 
78  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
79 
81 
82  auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
83 
84  ARMNN_ASSERT(inferredShapes.size() == 1);
85 
86  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
87 }
88 
90 {
91  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
92 }
93 
94 } // namespace armnn
ArgMinMaxDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
ArgMinMaxLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
Copyright (c) 2021 ARM Limited and Contributors.
const ArgMinMaxDescriptor & GetParameters() const override
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:206
ArgMinMaxLayer(const ArgMinMaxDescriptor &param, const char *name)
Constructor to create a ArgMinMaxLayer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
This layer represents a ArgMinMax operation.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the ArgMinMax type.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
virtual const TensorInfo & GetTensorInfo() const =0
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ArgMinMaxLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:319
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from a given input shape and axis parameter.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:423
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468