ArmNN
 22.02
ClDivisionWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClDivisionWorkload.hpp"
7 
10 
11 #include <cl/ClTensorHandle.hpp>
12 
13 #include "ClWorkloadUtils.hpp"
14 
15 namespace armnn
16 {
17 
19  const TensorInfo& input1,
20  const TensorInfo& output,
21  const ActivationDescriptor* activationDescriptor)
22 {
23  const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
24  const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
25  const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
26 
27  const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
28  activationDescriptor);
29 
30  return arm_compute::CLArithmeticDivision::validate(&aclInput1, &aclInput2, &aclOutput, activationInfo);
31 }
32 
33 
35  const WorkloadInfo& info,
36  const arm_compute::CLCompileContext& clCompileContext)
37  : ClBaseWorkload<DivisionQueueDescriptor>(descriptor, info)
38 {
39  m_Data.ValidateInputsOutputs("ClDivisionWorkload", 2, 1);
40 
41  arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
42  arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
43  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
44 
45  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
46 
47  {
48  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClDivisionWorkload_configure");
49  m_ArithmeticDivision.configure(clCompileContext, &input0, &input1, &output, activationInfo);
50  }
51 }
52 
54 {
55  ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClDivisionWorkload_Execute", this->GetGuid());
56  RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
57 }
58 
59 } //namespace armnn
#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid)
void Execute() const override
arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
ClDivisionWorkload(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
Status
enumeration
Definition: Types.hpp:29
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
profiling::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:55
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)