ArmNN
 21.02
ClQLstmWorkload Class Reference

#include <ClQLstmWorkload.hpp>

Inheritance diagram for ClQLstmWorkload:
BaseWorkload< QLstmQueueDescriptor > IWorkload

Public Member Functions

 ClQLstmWorkload (const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
 
virtual void Execute () const override
 
- Public Member Functions inherited from BaseWorkload< QLstmQueueDescriptor >
 BaseWorkload (const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void PostAllocationConfigure () override
 
const QLstmQueueDescriptorGetData () const
 
profiling::ProfilingGuid GetGuid () const final
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< QLstmQueueDescriptor >
const QLstmQueueDescriptor m_Data
 
const profiling::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 19 of file ClQLstmWorkload.hpp.

Constructor & Destructor Documentation

◆ ClQLstmWorkload()

ClQLstmWorkload ( const QLstmQueueDescriptor descriptor,
const WorkloadInfo info,
const arm_compute::CLCompileContext &  clCompileContext 
)

Definition at line 17 of file ClQLstmWorkload.cpp.

20  : BaseWorkload<QLstmQueueDescriptor>(descriptor, info)
21 {
22  arm_compute::LSTMParams<arm_compute::ICLTensor> qLstmParams;
23 
24  // Mandatory params
25  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
26  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
27 
28  m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
29  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
30 
31  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
32  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
33 
34  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
35  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
36 
37  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
38  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
39 
40  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
41  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
42 
43  m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
44  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
45 
46  m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
47  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
48 
49  m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
50  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
51 
52  // Create tensors for optional params if they are enabled
54  {
55  m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
56 
58  {
59  // In ACL this is categorised as a CIFG param and not a Peephole param
60  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
61  }
62 
63  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
64  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
65 
66  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
67  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
68 
69  // Set Peephole params
70  qLstmParams.set_peephole_params(m_CellToForgetWeightsTensor.get(),
71  m_CellToOutputWeightsTensor.get());
72  }
73 
75  {
76  m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
77  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
78 
79  m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
80  if (m_Data.m_ProjectionBias != nullptr)
81  {
82  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
83  }
84 
85  // Set projection params
86  qLstmParams.set_projection_params(
87  m_ProjectionWeightsTensor.get(),
88  m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
89  }
90 
92  {
93  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
94 
96  {
97  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
98  }
99 
100  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
101  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
102 
103  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
104  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
105 
106  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
107  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
108 
109  // Set layer norm params
110  qLstmParams.set_layer_normalization_params(
111  m_Data.m_InputLayerNormWeights != nullptr ? m_InputLayerNormWeightsTensor.get() : nullptr,
112  m_ForgetLayerNormWeightsTensor.get(),
113  m_CellLayerNormWeightsTensor.get(),
114  m_OutputLayerNormWeightsTensor.get());
115  }
116 
118  {
119  m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
120  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
121 
122  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
123  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
124 
125  m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
126  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
127 
128  // Set CIFG params
129  qLstmParams.set_cifg_params(
130  m_InputToInputWeightsTensor.get(),
131  m_RecurrentToInputWeightsTensor.get(),
132  m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
133  m_InputGateBiasTensor.get());
134  }
135 
136  // Input/Output tensors
137  const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
138  arm_compute::ICLTensor& outputStateIn = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
139  arm_compute::ICLTensor& cellStateIn = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
140 
141  arm_compute::ICLTensor& outputStateOut = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
142  arm_compute::ICLTensor& cellStateOut = static_cast<IClTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
143  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
144 
145  // Set scalar descriptor params
146  qLstmParams.set_cell_clip_params(m_Data.m_Parameters.m_CellClip);
147  qLstmParams.set_projection_clip_params(m_Data.m_Parameters.m_ProjectionClip);
148  qLstmParams.set_hidden_state_params(m_Data.m_Parameters.m_HiddenStateZeroPoint,
150  qLstmParams.set_matmul_scale_params(m_Data.m_Parameters.m_InputIntermediateScale,
154 
155  // QLSTM CL configure
156  m_QLstmLayer.configure(clCompileContext,
157  &input,
158  m_InputToForgetWeightsTensor.get(),
159  m_InputToCellWeightsTensor.get(),
160  m_InputToOutputWeightsTensor.get(),
161  m_RecurrentToForgetWeightsTensor.get(),
162  m_RecurrentToCellWeightsTensor.get(),
163  m_RecurrentToOutputWeightsTensor.get(),
164  m_ForgetGateBiasTensor.get(),
165  m_CellBiasTensor.get(),
166  m_OutputGateBiasTensor.get(),
167  &cellStateIn,
168  &outputStateIn,
169  &cellStateOut,
170  &outputStateOut,
171  &output,
172  qLstmParams);
173 
174  // Initialise ACL tensor data for mandatory params
175  InitializeArmComputeClTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
176  InitializeArmComputeClTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
177  InitializeArmComputeClTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
178 
179  InitializeArmComputeClTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
180  InitializeArmComputeClTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
181  InitializeArmComputeClTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
182 
183  InitializeArmComputeClTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
185  InitializeArmComputeClTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
186 
187  // Initialise ACL tensor data for optional params
189  {
190  InitializeArmComputeClTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
191  InitializeArmComputeClTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
193  }
194 
196  {
197  InitializeArmComputeClTensorData(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights);
198 
199  if (m_Data.m_ProjectionBias != nullptr)
200  {
201  InitializeArmComputeClTensorData(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias);
202  }
203  }
204 
206  {
208  {
209  InitializeArmComputeClTensorData(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights);
210  }
211 
212  InitializeArmComputeClTensorData(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights);
213  InitializeArmComputeClTensorData(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights);
214  }
215 
217  {
219  {
220  InitializeArmComputeClTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
221  }
222  InitializeArmComputeClTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
223  InitializeArmComputeClTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
224  InitializeArmComputeClTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
225  }
226 
227  m_QLstmLayer.prepare();
228 
229  FreeUnusedTensors();
230 }
const ConstCpuTensorHandle * m_CellToForgetWeights
const ConstCpuTensorHandle * m_ProjectionWeights
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstCpuTensorHandle *handle)
bool m_PeepholeEnabled
Enable/disable peephole.
const ConstCpuTensorHandle * m_ProjectionBias
float m_HiddenStateScale
Hidden State quantization scale.
const ConstCpuTensorHandle * m_ForgetLayerNormWeights
const QLstmQueueDescriptor m_Data
Definition: Workload.hpp:46
float m_OutputIntermediateScale
Output intermediate quantization scale.
const ConstCpuTensorHandle * m_CellLayerNormWeights
const ConstCpuTensorHandle * m_RecurrentToCellWeights
const ConstCpuTensorHandle * m_RecurrentToInputWeights
const ConstCpuTensorHandle * m_OutputGateBias
const ConstCpuTensorHandle * m_CellBias
const ConstCpuTensorHandle * m_CellToOutputWeights
const ConstCpuTensorHandle * m_OutputLayerNormWeights
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstCpuTensorHandle * m_InputToForgetWeights
float m_ProjectionClip
Clipping threshold value for the projection.
float m_InputIntermediateScale
Input intermediate quantization scale.
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
const ConstCpuTensorHandle * m_CellToInputWeights
float m_CellClip
Clipping threshold value for the cell state.
const ConstCpuTensorHandle * m_RecurrentToOutputWeights
std::vector< ITensorHandle * > m_Outputs
bool m_ProjectionEnabled
Enable/disable the projection layer.
const ConstCpuTensorHandle * m_InputGateBias
std::vector< ITensorHandle * > m_Inputs
const ConstCpuTensorHandle * m_InputLayerNormWeights
const ConstCpuTensorHandle * m_RecurrentToForgetWeights
const ConstCpuTensorHandle * m_ForgetGateBias
float m_CellIntermediateScale
Cell intermediate quantization scale.
const ConstCpuTensorHandle * m_InputToOutputWeights
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
const TensorInfo & GetTensorInfo() const
const ConstCpuTensorHandle * m_InputToInputWeights
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
const ConstCpuTensorHandle * m_InputToCellWeights

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 232 of file ClQLstmWorkload.cpp.

233 {
234  m_QLstmLayer.run();
235 }

The documentation for this class was generated from the following files: