ArmNN
 23.08
ClMaximumWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClMaximumWorkload.hpp"
7 
8 #include "ClWorkloadUtils.hpp"
9 
12 
14 
15 #include <cl/ClLayerSupport.hpp>
16 #include <cl/ClTensorHandle.hpp>
17 #include <cl/ClLayerSupport.hpp>
18 
19 namespace armnn
20 {
21 
22 using namespace armcomputetensorutils;
23 
25  const TensorInfo& input1,
26  const TensorInfo& output)
27 {
28  const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
29  const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
30  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
31 
32  const arm_compute::Status aclStatus = arm_compute::CLElementwiseMax::validate(&aclInput0Info,
33  &aclInput1Info,
34  &aclOutputInfo);
35 
36  return aclStatus;
37 }
38 
40  const WorkloadInfo& info,
41  const arm_compute::CLCompileContext& clCompileContext)
43 {
44  m_Data.ValidateInputsOutputs("ClMaximumWorkload", 2, 1);
45 
46  arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
47  arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
48  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
49 
50  {
51  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMaximumWorkload_configure");
52  m_MaximumLayer.configure(clCompileContext, &input0, &input1, &output);
53  }
54 }
55 
57 {
58  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMaximumWorkload_Execute");
59  RunClFunction(m_MaximumLayer, CHECK_LOCATION());
60 }
61 
62 } //namespace armnn
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:168
armnn::MaximumQueueDescriptor
Definition: WorkloadData.hpp:277
armnn::ClMaximumWorkloadValidate
arm_compute::Status ClMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMaximumWorkload.cpp:24
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:446
armnn::TensorInfo
Definition: Tensor.hpp:152
ClLayerSupport.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::ClBaseWorkload
Definition: ClBaseWorkload.hpp:13
armnn::ClMaximumWorkload::Execute
void Execute() const override
Definition: ClMaximumWorkload.cpp:56
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
ClWorkloadUtils.hpp
ArmComputeUtils.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::ClMaximumWorkload::ClMaximumWorkload
ClMaximumWorkload(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClMaximumWorkload.cpp:39
armnn::IClTensorHandle
Definition: IClTensorHandle.hpp:13
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
ClTensorHandle.hpp
armnn::BaseWorkload< MaximumQueueDescriptor >::m_Data
MaximumQueueDescriptor m_Data
Definition: Workload.hpp:89
ClMaximumWorkload.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26