ArmNN
 24.02
ClDequantizeWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "ClWorkloadUtils.hpp"
8 
12 
13 #include <arm_compute/core/Types.h>
14 
15 #include <cl/ClLayerSupport.hpp>
16 #include <cl/ClTensorHandle.hpp>
17 
18 namespace armnn
19 {
20 using namespace armcomputetensorutils;
21 
23 {
24  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
25  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
26 
27  return arm_compute::CLDequantizationLayer::validate(&aclInputInfo, &aclOutputInfo);
28 }
29 
31  const WorkloadInfo& workloadInfo,
32  const arm_compute::CLCompileContext& clCompileContext)
33  : ClBaseWorkload<DequantizeQueueDescriptor>(descriptor, workloadInfo)
34 {
35  m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1);
36 
37  arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
38  m_Data.m_Inputs[0])->GetTensor();
39 
40  arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
41  m_Data.m_Outputs[0])->GetTensor();
42 
43  m_Layer.reset(new arm_compute::CLDequantizationLayer());
44  {
45  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDequantizeWorkload_configure");
46  m_Layer->configure(clCompileContext, &input, &output);
47  }
48  m_Layer->prepare();
49 }
50 
52 {
53  if (m_Layer)
54  {
55  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDequantizeWorkload_Execute");
56  m_Layer->run();
57  }
58 }
59 
60 } // namespace armnn
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:446
armnn::TensorInfo
Definition: Tensor.hpp:152
ClLayerSupport.hpp
armnn::ClBaseWorkload
Definition: ClBaseWorkload.hpp:13
ClDequantizeWorkload.hpp
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
PolymorphicDowncast.hpp
ClWorkloadUtils.hpp
armnn::ClDequantizeWorkload::Execute
void Execute() const override
Definition: ClDequantizeWorkload.cpp:51
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
ClTensorHandle.hpp
armnn::BaseWorkload< DequantizeQueueDescriptor >::m_Data
DequantizeQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::DequantizeQueueDescriptor
Definition: WorkloadData.hpp:524
armnn::ClDequantizeWorkload::ClDequantizeWorkload
ClDequantizeWorkload(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &workloadInfo, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClDequantizeWorkload.cpp:30
armnn::ClDequantizeWorkloadValidate
arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: ClDequantizeWorkload.cpp:22
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26