ArmNN
 24.02
ClBatchNormalizationFloatWorkload Class Reference

#include <ClBatchNormalizationFloatWorkload.hpp>

Inheritance diagram for ClBatchNormalizationFloatWorkload:
[legend]
Collaboration diagram for ClBatchNormalizationFloatWorkload:
[legend]

Public Member Functions

 ClBatchNormalizationFloatWorkload (const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
 
void Execute () const override
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from TypedWorkload< QueueDescriptor, DataTypes >
 TypedWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
- Public Member Functions inherited from BaseWorkload< QueueDescriptor >
 BaseWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual const std::string & GetName () const override
 
void ExecuteAsync (ExecutionData &executionData) override
 
void PostAllocationConfigure () override
 
const QueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< QueueDescriptor >
QueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 
const std::string m_Name
 

Detailed Description

Definition at line 25 of file ClBatchNormalizationFloatWorkload.hpp.

Constructor & Destructor Documentation

◆ ClBatchNormalizationFloatWorkload()

ClBatchNormalizationFloatWorkload ( const BatchNormalizationQueueDescriptor descriptor,
const WorkloadInfo info,
const arm_compute::CLCompileContext &  clCompileContext 
)

Definition at line 54 of file ClBatchNormalizationFloatWorkload.cpp.

58  : FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
59 {
60  // Report Profiling Details
61  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchNormalizationWorkload_Construct",
62  descriptor.m_Parameters,
63  info,
64  this->GetGuid());
65 
66  m_Mean = std::make_unique<arm_compute::CLTensor>();
67  BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo());
68 
69  m_Variance = std::make_unique<arm_compute::CLTensor>();
70  BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo());
71 
72  m_Gamma = std::make_unique<arm_compute::CLTensor>();
73  BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo());
74 
75  m_Beta = std::make_unique<arm_compute::CLTensor>();
76  BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
77 
78  m_Data.ValidateInputsOutputs("ClBatchNormalizationFloatWorkload", 1, 1);
79 
80  arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
81  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
82 
83  arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
84  input.info()->set_data_layout(aclDataLayout);
85  output.info()->set_data_layout(aclDataLayout);
86 
87  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
88 
89  {
90  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_configure");
91  m_Layer.configure(clCompileContext,
92  &input,
93  &output,
94  m_Mean.get(),
95  m_Variance.get(),
96  m_Beta.get(),
97  m_Gamma.get(),
98  m_Data.m_Parameters.m_Eps,
99  activationInfo);
100  }
101 
102  InitializeArmComputeClTensorData(*m_Mean, m_Data.m_Mean);
103  InitializeArmComputeClTensorData(*m_Variance, m_Data.m_Variance);
104  InitializeArmComputeClTensorData(*m_Beta, m_Data.m_Beta);
105  InitializeArmComputeClTensorData(*m_Gamma, m_Data.m_Gamma);
106 
107  // Force Compute Library to perform the necessary copying and reshaping, after which
108  // delete all the input tensors that will no longer be needed
109  m_Layer.prepare();
110  FreeUnusedTensors();
111 }

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, armnn::info, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 113 of file ClBatchNormalizationFloatWorkload.cpp.

114 {
115  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchNormalizationFloatWorkload_Execute");
116  RunClFunction(m_Layer, CHECK_LOCATION());
117 }

References ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID, CHECK_LOCATION, and armnn::RunClFunction().

◆ ReplaceInputTensorHandle()

void ReplaceInputTensorHandle ( ITensorHandle tensorHandle,
unsigned int  slot 
)
overridevirtual

Reimplemented from BaseWorkload< QueueDescriptor >.

Definition at line 127 of file ClBatchNormalizationFloatWorkload.cpp.

128 {
129  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
130  this->m_Data.m_Inputs[slot] = tensorHandle;
131  try
132  {
133  Reconfigure();
134  }
136  {
137  // Cannot reconfigure, revert the slot back and throw the exception.
138  this->m_Data.m_Inputs[slot] = backupHandle;
139  throw e;
140  }
141 }

References BaseWorkload< QueueDescriptor >::m_Data, and QueueDescriptor::m_Inputs.

◆ ReplaceOutputTensorHandle()

void ReplaceOutputTensorHandle ( ITensorHandle tensorHandle,
unsigned int  slot 
)
overridevirtual

Reimplemented from BaseWorkload< QueueDescriptor >.

Definition at line 144 of file ClBatchNormalizationFloatWorkload.cpp.

145 {
146  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
147  this->m_Data.m_Inputs[slot] = tensorHandle;
148  try
149  {
150  Reconfigure();
151  }
153  {
154  // Cannot reconfigure, revert the slot back and throw the exception.
155  this->m_Data.m_Inputs[slot] = backupHandle;
156  throw e;
157  }
158 }

References BaseWorkload< QueueDescriptor >::m_Data, and QueueDescriptor::m_Inputs.


The documentation for this class was generated from the following files:
armnn::InitializeArmComputeClTensorData
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
Definition: ClWorkloadUtils.hpp:124
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:446
armnn::ConvertAdditionalInfoToAclActivationLayerInfo
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
Definition: ArmComputeUtils.hpp:105
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::BaseWorkload::m_Data
QueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:168
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26