ArmNN
 21.02
NeonArgMinMaxWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonWorkloadUtils.hpp"
8 
10 
12 
16 
17 #include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h>
18 
19 namespace
20 {
21 unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axisIndex)
22 {
23  return (numDimensions - axisIndex) - 1;
24 }
25 
26 } //namespace
27 
28 namespace armnn
29 {
30 
32  const TensorInfo& output,
33  const ArgMinMaxDescriptor& descriptor)
34 {
35  const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
36  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
37 
38  auto numDims = input.GetNumDimensions();
39  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
40  int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
41 
42  if (descriptor.m_Function == ArgMinMaxFunction::Max)
43  {
44  return arm_compute::NEArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
45  arm_compute::ReductionOperation::ARG_IDX_MAX);
46  }
47  else
48  {
49  return arm_compute::NEArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
50  arm_compute::ReductionOperation::ARG_IDX_MIN);
51  }
52 }
53 
54 
56  const WorkloadInfo& info)
57  : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info)
58 {
59  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
60  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
61 
62  auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
63  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
64  int aclAxis = armnn::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
65 
66  auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
67 
69  {
70  layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
71  }
72  else
73  {
74  layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
75  }
76 
77  m_ArgMinMaxLayer.reset(layer.release());
78 }
79 
81 {
82  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonArgMinMaxWorkload_Execute");
83  m_ArgMinMaxLayer->run();
84 }
85 
86 } //namespace armnn
87 
const ArgMinMaxQueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< TensorInfo > m_InputTensorInfos
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
Status
enumeration
Definition: Types.hpp:26
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
std::vector< ITensorHandle * > m_Outputs
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
std::vector< ITensorHandle * > m_Inputs
NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual void Execute() const override
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191