ArmNN
 20.05
NeonArgMinMaxWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonWorkloadUtils.hpp"
8 
10 
12 
15 
16 #include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h>
17 
18 namespace
19 {
20 unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axisIndex)
21 {
22  return (numDimensions - axisIndex) - 1;
23 }
24 
25 } //namespace
26 
27 namespace armnn
28 {
29 
31  const TensorInfo& output,
32  const ArgMinMaxDescriptor& descriptor)
33 {
34  const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
35  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
36 
37  auto numDims = input.GetNumDimensions();
38  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
39  int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
40 
41  if (descriptor.m_Function == ArgMinMaxFunction::Max)
42  {
43  return arm_compute::NEArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
44  arm_compute::ReductionOperation::ARG_IDX_MAX);
45  }
46  else
47  {
48  return arm_compute::NEArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
49  arm_compute::ReductionOperation::ARG_IDX_MIN);
50  }
51 }
52 
53 
55  const WorkloadInfo& info)
56  : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info)
57 {
58  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
59  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
60 
61  auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
62  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
63  int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
64 
65  auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
66 
68  {
69  layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
70  }
71  else
72  {
73  layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
74  }
75 
76  m_ArgMinMaxLayer.reset(layer.release());
77 }
78 
80 {
81  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonArgMinMaxWorkload_Execute");
82  m_ArgMinMaxLayer->run();
83 }
84 
85 } //namespace armnn
86 
const ArgMinMaxQueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:64
Copyright (c) 2020 ARM Limited.
std::vector< TensorInfo > m_InputTensorInfos
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
Status
enumeration
Definition: Types.hpp:26
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:66
std::vector< ITensorHandle * > m_Inputs
NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual void Execute() const override
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92