ArmNN
 20.02
NeonArgMinMaxWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "NeonWorkloadUtils.hpp"
8 
10 
12 
14 
15 #include <arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h>
16 
17 namespace
18 {
19 unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axisIndex)
20 {
21  return (numDimensions - axisIndex) - 1;
22 }
23 
24 } //namespace
25 
26 namespace armnn
27 {
28 
30  const TensorInfo& output,
31  const ArgMinMaxDescriptor& descriptor)
32 {
33  const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
34  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
35 
36  auto numDims = input.GetNumDimensions();
37  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
38  int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
39 
40  if (descriptor.m_Function == ArgMinMaxFunction::Max)
41  {
42  return arm_compute::NEArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
43  arm_compute::ReductionOperation::ARG_IDX_MAX);
44  }
45  else
46  {
47  return arm_compute::NEArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
48  arm_compute::ReductionOperation::ARG_IDX_MIN);
49  }
50 }
51 
52 
54  const WorkloadInfo& info)
55  : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info)
56 {
57  arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
58  arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
59 
60  auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
61  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
62  int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
63 
64  auto layer = std::make_unique<arm_compute::NEArgMinMaxLayer>();
65 
67  {
68  layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
69  }
70  else
71  {
72  layer->configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
73  }
74 
75  m_ArgMinMaxLayer.reset(layer.release());
76 }
77 
79 {
80  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonArgMinMaxWorkload_Execute");
81  m_ArgMinMaxLayer->run();
82 }
83 
84 } //namespace armnn
85 
const ArgMinMaxQueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:56
Copyright (c) 2020 ARM Limited.
std::vector< TensorInfo > m_InputTensorInfos
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
Status
enumeration
Definition: Types.hpp:26
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58
std::vector< ITensorHandle * > m_Inputs
NeonArgMinMaxWorkload(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual void Execute() const override
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92