ArmNN
 22.02
ClLstmFloatWorkload Class Reference

#include <ClLstmFloatWorkload.hpp>

Inheritance diagram for ClLstmFloatWorkload:
TypedWorkload< QueueDescriptor, DataTypes > BaseWorkload< QueueDescriptor > IWorkload

Public Member Functions

 ClLstmFloatWorkload (const LstmQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
 
void Execute () const override
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from TypedWorkload< QueueDescriptor, DataTypes >
 TypedWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
- Public Member Functions inherited from BaseWorkload< QueueDescriptor >
 BaseWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ExecuteAsync (WorkingMemDescriptor &workingMemDescriptor) override
 
void PostAllocationConfigure () override
 
const QueueDescriptorGetData () const
 
profiling::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< QueueDescriptor >
QueueDescriptor m_Data
 
const profiling::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 18 of file ClLstmFloatWorkload.hpp.

Constructor & Destructor Documentation

◆ ClLstmFloatWorkload()

ClLstmFloatWorkload ( const LstmQueueDescriptor descriptor,
const WorkloadInfo info,
const arm_compute::CLCompileContext &  clCompileContext 
)

Definition at line 22 of file ClLstmFloatWorkload.cpp.

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

25  : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
26 {
27  // Report Profiling Details
28  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClLstmFloatWorkload_Construct",
29  descriptor.m_Parameters,
30  info,
31  this->GetGuid());
32 
33  arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
34 
35  // Basic parameters
36  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
37  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
38 
39  m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
40  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
41 
42  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
43  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
44 
45  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
46  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
47 
48  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
49  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
50 
51  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
52  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
53 
54  m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
55  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
56 
57  m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
58  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
59 
60  m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
61  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
62 
63  // for future reference: check the AndroidNN API for the logic here
64  if (!m_Data.m_Parameters.m_CifgEnabled)
65  {
66  m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
67  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
68 
69  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
70  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
71 
72  m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
73  if (m_Data.m_CellToInputWeights != nullptr)
74  {
75  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
76  }
77 
78  m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
79  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
80 
81  lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
82  m_RecurrentToInputWeightsTensor.get(),
83  m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
84  m_InputGateBiasTensor.get());
85  }
86 
87  if (m_Data.m_Parameters.m_ProjectionEnabled)
88  {
89  m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
90  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
91 
92  m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
93  if (m_Data.m_ProjectionBias != nullptr)
94  {
95  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
96  }
97 
98  lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
99  m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
100  }
101 
102  if (m_Data.m_Parameters.m_PeepholeEnabled)
103  {
104  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
105  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
106 
107  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
108  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
109 
110  lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
111  }
112 
113  if (m_Data.m_Parameters.m_LayerNormEnabled)
114  {
115  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
116  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
117  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
118  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
119 
120  if (!m_Data.m_Parameters.m_CifgEnabled)
121  {
122  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
123  }
124  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
125  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
126  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
127 
128  lstm_param.set_layer_normalization_params(m_Data.m_Parameters.m_CifgEnabled ? nullptr :
129  m_InputLayerNormWeightsTensor.get(),
130  m_ForgetLayerNormWeightsTensor.get(),
131  m_CellLayerNormWeightsTensor.get(),
132  m_OutputLayerNormWeightsTensor.get());
133  }
134 
135  const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
136  const arm_compute::ICLTensor& output_state_in = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
137  arm_compute::ICLTensor& cell_state_in = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
138 
139  arm_compute::ICLTensor& output_state_out = static_cast<IClTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
140  arm_compute::ICLTensor& cell_state_out = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
141  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
142 
143  // Get the batch_size and the num_units from the cellStateIn dimensions
144  const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
145  const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
146  const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
147 
148  m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
149  if (m_Data.m_Parameters.m_CifgEnabled)
150  {
151  // 2D tensor with dimensions [num_units * 3, batch_size] with CIFG
152  armnn::TensorInfo scratchBuffer1({ batch_size, num_units * 3 }, DataType::Float32);
153  BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
154  }
155  else
156  {
157  // scratch_buffer [num_units * 4, batch_size] without CIFG
158  armnn::TensorInfo scratchBuffer2({ batch_size, num_units * 4 }, DataType::Float32);
159  BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
160  }
161 
162  float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
163  float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
164 
165  // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
166  arm_compute::ActivationLayerInfo activationLayerInfo;
167  if (m_Data.m_Parameters.m_ActivationFunc == 0)
168  {
169  // no activation, do nothing
170  }
171  else if (m_Data.m_Parameters.m_ActivationFunc == 1)
172  {
173  activationLayerInfo = arm_compute::ActivationLayerInfo(
174  arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
175  }
176  else if (m_Data.m_Parameters.m_ActivationFunc == 3)
177  {
178  activationLayerInfo = arm_compute::ActivationLayerInfo(
179  arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
180  }
181  else if (m_Data.m_Parameters.m_ActivationFunc == 4)
182  {
183  activationLayerInfo = arm_compute::ActivationLayerInfo(
184  arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
185  }
186  else if (m_Data.m_Parameters.m_ActivationFunc == 6)
187  {
188  activationLayerInfo = arm_compute::ActivationLayerInfo(
189  arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
190  }
191  else
192  {
193  throw armnn::Exception("Wrong Type of Activation Function!");
194  }
195 
196  {
197  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClLstmFloatWorkload_configure");
198  m_LstmLayer.configure(clCompileContext, &input, m_InputToForgetWeightsTensor.get(),
199  m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),
200  m_RecurrentToForgetWeightsTensor.get(), m_RecurrentToCellWeightsTensor.get(),
201  m_RecurrentToOutputWeightsTensor.get(), m_ForgetGateBiasTensor.get(),
202  m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(), &output_state_in,
203  &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
204  &cell_state_out, &output, lstm_param, activationLayerInfo,
205  cell_threshold, projection_threshold);
206  }
207 
208  armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
209 
210  InitializeArmComputeClTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
211  InitializeArmComputeClTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
212  InitializeArmComputeClTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
213  InitializeArmComputeClTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
214  InitializeArmComputeClTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
215  InitializeArmComputeClTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
216  InitializeArmComputeClTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
217  InitializeArmComputeClTensorData(*m_CellBiasTensor, m_Data.m_CellBias);
218  InitializeArmComputeClTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
219 
220  if (!m_Data.m_Parameters.m_CifgEnabled)
221  {
222  InitializeArmComputeClTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
223  InitializeArmComputeClTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
224  if (m_Data.m_CellToInputWeights != nullptr)
225  {
226  InitializeArmComputeClTensorData(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights);
227  }
228  InitializeArmComputeClTensorData(*m_InputGateBiasTensor, m_Data.m_InputGateBias);
229  }
230 
231  if (m_Data.m_Parameters.m_ProjectionEnabled)
232  {
233  InitializeArmComputeClTensorData(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights);
234  if (m_Data.m_ProjectionBias != nullptr)
235  {
236  InitializeArmComputeClTensorData(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias);
237  }
238  }
239 
240  if (m_Data.m_Parameters.m_PeepholeEnabled)
241  {
242  InitializeArmComputeClTensorData(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights);
243  InitializeArmComputeClTensorData(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights);
244  }
245 
246  if (m_Data.m_Parameters.m_LayerNormEnabled)
247  {
248  if (!m_Data.m_Parameters.m_CifgEnabled)
249  {
250  InitializeArmComputeClTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
251  }
252 
253  InitializeArmComputeClTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
254  InitializeArmComputeClTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
255  InitializeArmComputeClTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
256  }
257 
258  // Force Compute Library to perform the necessary copying and reshaping, after which
259  // delete all the input tensors that will no longer be needed
260  m_LstmLayer.prepare();
261  FreeUnusedTensors();
262 }
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
QueueDescriptor m_Data
Definition: Workload.hpp:77
std::vector< ITensorHandle * > m_Outputs
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
std::vector< ITensorHandle * > m_Inputs

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 264 of file ClLstmFloatWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT_CL_GUID, CHECK_LOCATION, BaseWorkload< QueueDescriptor >::GetGuid(), and armnn::RunClFunction().

265 {
266  ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClLstmFloatWorkload_Execute", this->GetGuid());
267  RunClFunction(m_LstmLayer, CHECK_LOCATION());
268 }
#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
profiling::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:55

◆ ReplaceInputTensorHandle()

void ReplaceInputTensorHandle ( ITensorHandle tensorHandle,
unsigned int  slot 
)
overridevirtual

Reimplemented from BaseWorkload< QueueDescriptor >.

Definition at line 449 of file ClLstmFloatWorkload.cpp.

References BaseWorkload< QueueDescriptor >::m_Data, and QueueDescriptor::m_Inputs.

450 {
451  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
452  this->m_Data.m_Inputs[slot] = tensorHandle;
453  try
454  {
455  Reconfigure();
456  }
458  {
459  // Cannot reconfigure, revert the slot back and throw the exception.
460  this->m_Data.m_Inputs[slot] = backupHandle;
461  throw e;
462  }
463 }
QueueDescriptor m_Data
Definition: Workload.hpp:77
std::vector< ITensorHandle * > m_Inputs

◆ ReplaceOutputTensorHandle()

void ReplaceOutputTensorHandle ( ITensorHandle tensorHandle,
unsigned int  slot 
)
overridevirtual

Reimplemented from BaseWorkload< QueueDescriptor >.

Definition at line 466 of file ClLstmFloatWorkload.cpp.

References BaseWorkload< QueueDescriptor >::m_Data, and QueueDescriptor::m_Inputs.

467 {
468  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
469  this->m_Data.m_Inputs[slot] = tensorHandle;
470  try
471  {
472  Reconfigure();
473  }
475  {
476  // Cannot reconfigure, revert the slot back and throw the exception.
477  this->m_Data.m_Inputs[slot] = backupHandle;
478  throw e;
479  }
480 }
QueueDescriptor m_Data
Definition: Workload.hpp:77
std::vector< ITensorHandle * > m_Inputs

The documentation for this class was generated from the following files: