ArmNN
 21.08
ClQLstmWorkload Class Reference

#include <ClQLstmWorkload.hpp>

Inheritance diagram for ClQLstmWorkload:
BaseWorkload< QLstmQueueDescriptor > IWorkload

Public Member Functions

 ClQLstmWorkload (const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
 
virtual void Execute () const override
 
- Public Member Functions inherited from BaseWorkload< QLstmQueueDescriptor >
 BaseWorkload (const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ExecuteAsync (WorkingMemDescriptor &workingMemDescriptor) override
 
void PostAllocationConfigure () override
 
const QLstmQueueDescriptorGetData () const
 
profiling::ProfilingGuid GetGuid () const final
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< QLstmQueueDescriptor >
QLstmQueueDescriptor m_Data
 
const profiling::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 19 of file ClQLstmWorkload.hpp.

Constructor & Destructor Documentation

◆ ClQLstmWorkload()

ClQLstmWorkload ( const QLstmQueueDescriptor descriptor,
const WorkloadInfo info,
const arm_compute::CLCompileContext &  clCompileContext 
)

Definition at line 17 of file ClQLstmWorkload.cpp.

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

20  : BaseWorkload<QLstmQueueDescriptor>(descriptor, info)
21 {
22  // Report Profiling Details
23  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClQLstmWorkload_Construct",
24  descriptor.m_Parameters,
25  info,
26  this->GetGuid());
27 
28  arm_compute::LSTMParams<arm_compute::ICLTensor> qLstmParams;
29 
30  // Mandatory params
31  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
32  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
33 
34  m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
35  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
36 
37  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
38  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
39 
40  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
41  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
42 
43  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
44  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
45 
46  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
47  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
48 
49  m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
50  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
51 
52  m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
53  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
54 
55  m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
56  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
57 
58  // Create tensors for optional params if they are enabled
60  {
61  m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
62 
64  {
65  // In ACL this is categorised as a CIFG param and not a Peephole param
66  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
67  }
68 
69  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
70  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
71 
72  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
73  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
74 
75  // Set Peephole params
76  qLstmParams.set_peephole_params(m_CellToForgetWeightsTensor.get(),
77  m_CellToOutputWeightsTensor.get());
78  }
79 
81  {
82  m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
83  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
84 
85  m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
86  if (m_Data.m_ProjectionBias != nullptr)
87  {
88  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
89  }
90 
91  // Set projection params
92  qLstmParams.set_projection_params(
93  m_ProjectionWeightsTensor.get(),
94  m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
95  }
96 
98  {
99  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
100 
102  {
103  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
104  }
105 
106  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
107  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
108 
109  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
110  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
111 
112  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
113  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
114 
115  // Set layer norm params
116  qLstmParams.set_layer_normalization_params(
117  m_Data.m_InputLayerNormWeights != nullptr ? m_InputLayerNormWeightsTensor.get() : nullptr,
118  m_ForgetLayerNormWeightsTensor.get(),
119  m_CellLayerNormWeightsTensor.get(),
120  m_OutputLayerNormWeightsTensor.get());
121  }
122 
124  {
125  m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
126  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
127 
128  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
129  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
130 
131  m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
132  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
133 
134  // Set CIFG params
135  qLstmParams.set_cifg_params(
136  m_InputToInputWeightsTensor.get(),
137  m_RecurrentToInputWeightsTensor.get(),
138  m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
139  m_InputGateBiasTensor.get());
140  }
141 
142  // Input/Output tensors
143  const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
144  arm_compute::ICLTensor& outputStateIn = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
145  arm_compute::ICLTensor& cellStateIn = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
146 
147  arm_compute::ICLTensor& outputStateOut = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
148  arm_compute::ICLTensor& cellStateOut = static_cast<IClTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
149  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
150 
151  // Set scalar descriptor params
152  qLstmParams.set_cell_clip_params(m_Data.m_Parameters.m_CellClip);
153  qLstmParams.set_projection_clip_params(m_Data.m_Parameters.m_ProjectionClip);
154  qLstmParams.set_hidden_state_params(m_Data.m_Parameters.m_HiddenStateZeroPoint,
156  qLstmParams.set_matmul_scale_params(m_Data.m_Parameters.m_InputIntermediateScale,
160 
161  // QLSTM CL configure
162  m_QLstmLayer.configure(clCompileContext,
163  &input,
164  m_InputToForgetWeightsTensor.get(),
165  m_InputToCellWeightsTensor.get(),
166  m_InputToOutputWeightsTensor.get(),
167  m_RecurrentToForgetWeightsTensor.get(),
168  m_RecurrentToCellWeightsTensor.get(),
169  m_RecurrentToOutputWeightsTensor.get(),
170  m_ForgetGateBiasTensor.get(),
171  m_CellBiasTensor.get(),
172  m_OutputGateBiasTensor.get(),
173  &cellStateIn,
174  &outputStateIn,
175  &cellStateOut,
176  &outputStateOut,
177  &output,
178  qLstmParams);
179 
180  // Initialise ACL tensor data for mandatory params
181  InitializeArmComputeClTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
182  InitializeArmComputeClTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
183  InitializeArmComputeClTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
184 
185  InitializeArmComputeClTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
186  InitializeArmComputeClTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
187  InitializeArmComputeClTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
188 
189  InitializeArmComputeClTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
191  InitializeArmComputeClTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
192 
193  // Initialise ACL tensor data for optional params
195  {
196  InitializeArmComputeClTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
197  InitializeArmComputeClTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
199  }
200 
202  {
203  InitializeArmComputeClTensorData(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights);
204 
205  if (m_Data.m_ProjectionBias != nullptr)
206  {
207  InitializeArmComputeClTensorData(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias);
208  }
209  }
210 
212  {
214  {
215  InitializeArmComputeClTensorData(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights);
216  }
217 
218  InitializeArmComputeClTensorData(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights);
219  InitializeArmComputeClTensorData(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights);
220  }
221 
223  {
225  {
226  InitializeArmComputeClTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
227  }
228  InitializeArmComputeClTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
229  InitializeArmComputeClTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
230  InitializeArmComputeClTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
231  }
232 
233  m_QLstmLayer.prepare();
234 
235  FreeUnusedTensors();
236 }
const ConstTensorHandle * m_CellLayerNormWeights
const ConstTensorHandle * m_ProjectionWeights
const ConstTensorHandle * m_ForgetGateBias
const ConstTensorHandle * m_InputToOutputWeights
bool m_PeepholeEnabled
Enable/disable peephole.
float m_HiddenStateScale
Hidden State quantization scale.
const ConstTensorHandle * m_InputToInputWeights
const ConstTensorHandle * m_CellToOutputWeights
float m_OutputIntermediateScale
Output intermediate quantization scale.
const ConstTensorHandle * m_CellToInputWeights
const ConstTensorHandle * m_ForgetLayerNormWeights
const TensorInfo & GetTensorInfo() const
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_ProjectionClip
Clipping threshold value for the projection.
const ConstTensorHandle * m_InputToForgetWeights
float m_InputIntermediateScale
Input intermediate quantization scale.
const ConstTensorHandle * m_CellBias
const ConstTensorHandle * m_InputLayerNormWeights
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
const ConstTensorHandle * m_InputToCellWeights
float m_CellClip
Clipping threshold value for the cell state.
const ConstTensorHandle * m_CellToForgetWeights
const ConstTensorHandle * m_ProjectionBias
std::vector< ITensorHandle * > m_Outputs
const ConstTensorHandle * m_RecurrentToCellWeights
bool m_ProjectionEnabled
Enable/disable the projection layer.
const ConstTensorHandle * m_InputGateBias
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:226
const ConstTensorHandle * m_OutputGateBias
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
std::vector< ITensorHandle * > m_Inputs
const ConstTensorHandle * m_OutputLayerNormWeights
float m_CellIntermediateScale
Cell intermediate quantization scale.
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToInputWeights
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
const ConstTensorHandle * m_RecurrentToForgetWeights
int32_t m_HiddenStateZeroPoint
Hidden State zero point.

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 238 of file ClQLstmWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT_CL_GUID, and BaseWorkload< QLstmQueueDescriptor >::GetGuid().

239 {
240  ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClQuantizedLstmWorkload_Execute", this->GetGuid());
241  m_QLstmLayer.run();
242 }
#define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid)
profiling::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:55

The documentation for this class was generated from the following files: