ArmNN
 20.02
ClArgMinMaxWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "ClWorkloadUtils.hpp"
8 
10 
12 
14 
15 #include <cl/ClTensorHandle.hpp>
16 #include <cl/ClLayerSupport.hpp>
17 
18 namespace
19 {
20 unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axisIndex)
21 {
22  return (numDimensions - axisIndex) - 1;
23 }
24 
25 } //namespace
26 
27 namespace armnn
28 {
29 
31  const TensorInfo& output,
32  const ArgMinMaxDescriptor& descriptor)
33 {
34  const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
35  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
36 
37  auto numDims = input.GetNumDimensions();
38  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
39  int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
40 
41  if (descriptor.m_Function == ArgMinMaxFunction::Max)
42  {
43  return arm_compute::CLArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
44  arm_compute::ReductionOperation::ARG_IDX_MAX);
45  }
46  else
47  {
48  return arm_compute::CLArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
49  arm_compute::ReductionOperation::ARG_IDX_MIN);
50  }
51 }
52 
53 
55  const WorkloadInfo& info)
56  : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info)
57 {
58  arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
59  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
60 
61  auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
62  auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
63  int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
64 
66  {
67  m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
68  }
69  else
70  {
71  m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
72  }
73 }
74 
76 {
77  ARMNN_SCOPED_PROFILING_EVENT_CL("ClArgMinMaxWorkload_Execute");
78  RunClFunction(m_ArgMinMaxLayer, CHECK_LOCATION());
79 }
80 
81 } //namespace armnn
82 
virtual void Execute() const override
#define ARMNN_SCOPED_PROFILING_EVENT_CL(name)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
const ArgMinMaxQueueDescriptor m_Data
Definition: Workload.hpp:46
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:56
Copyright (c) 2020 ARM Limited.
std::vector< TensorInfo > m_InputTensorInfos
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
Status
enumeration
Definition: Types.hpp:26
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:58
std::vector< ITensorHandle * > m_Inputs
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92