ArmNN
 20.05
NeonQLstmWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonQLstmWorkload.hpp"
7 #include "NeonWorkloadUtils.hpp"
8 
10 
12 
13 namespace armnn
14 {
15 using namespace armcomputetensorutils;
16 
18  : BaseWorkload<QLstmQueueDescriptor>(descriptor, info)
19 {
20  arm_compute::LSTMParams<arm_compute::ITensor> qLstmParams;
21 
22  // Mandatory tensors
23  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
24  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
25 
26  m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
27  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
28 
29  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
30  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
31 
32  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
33  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
34 
35  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
36  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
37 
38  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
39  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
40 
41  m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
42  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
43 
44  m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
45  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
46 
47  m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
48  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
49 
50  // Create tensors for optional params if they are enabled
52  {
53  m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
54 
56  {
57  // In ACL this is categorised as a CIFG param and not a Peephole param
58  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
59  }
60 
61  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
62  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
63 
64  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
65  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
66 
67  // Set Peephole params
68  qLstmParams.set_peephole_params(m_CellToForgetWeightsTensor.get(),
69  m_CellToOutputWeightsTensor.get());
70  }
71 
73  {
74  m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
75  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
76 
77  m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
78  if (m_Data.m_ProjectionBias != nullptr)
79  {
80  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
81  }
82 
83  // Set projection params
84  qLstmParams.set_projection_params(
85  m_ProjectionWeightsTensor.get(),
86  m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
87  }
88 
90  {
91  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
92 
94  {
95  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
96  }
97 
98  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
99  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
100 
101  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
102  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
103 
104  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
105  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
106 
107  // Set layer norm params
108  qLstmParams.set_layer_normalization_params(
109  m_Data.m_InputLayerNormWeights != nullptr ? m_InputLayerNormWeightsTensor.get() : nullptr,
110  m_ForgetLayerNormWeightsTensor.get(),
111  m_CellLayerNormWeightsTensor.get(),
112  m_OutputLayerNormWeightsTensor.get());
113  }
114 
116  {
117  m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
118  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
119 
120  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
121  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
122 
123  m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
124  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
125 
126  // Set CIFG params
127  qLstmParams.set_cifg_params(
128  m_InputToInputWeightsTensor.get(),
129  m_RecurrentToInputWeightsTensor.get(),
130  m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
131  m_InputGateBiasTensor.get());
132  }
133 
134  // Input/output tensors
135  const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
136  const arm_compute::ITensor& outputStateIn = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
137  const arm_compute::ITensor& cellStateIn = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
138 
139  arm_compute::ITensor& outputStateOut = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
140  arm_compute::ITensor& cellStateOut = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
141  arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
142 
143 
144  // Set scalar descriptor params
145  qLstmParams.set_cell_clip_params(m_Data.m_Parameters.m_CellClip);
146  qLstmParams.set_projection_clip_params(m_Data.m_Parameters.m_ProjectionClip);
147  qLstmParams.set_hidden_state_params(m_Data.m_Parameters.m_HiddenStateZeroPoint,
149  qLstmParams.set_matmul_scale_params(m_Data.m_Parameters.m_InputIntermediateScale,
153 
154  // QLSTM NEON configure
155  m_QLstmLayer.configure(&input,
156  m_InputToForgetWeightsTensor.get(),
157  m_InputToCellWeightsTensor.get(),
158  m_InputToOutputWeightsTensor.get(),
159  m_RecurrentToForgetWeightsTensor.get(),
160  m_RecurrentToCellWeightsTensor.get(),
161  m_RecurrentToOutputWeightsTensor.get(),
162  m_ForgetGateBiasTensor.get(),
163  m_CellBiasTensor.get(),
164  m_OutputGateBiasTensor.get(),
165  &cellStateIn,
166  &outputStateIn,
167  &cellStateOut,
168  &outputStateOut,
169  &output,
170  qLstmParams);
171 
172  // Initialise ACL tensor data for mandatory params
173  InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights);
174  InitializeArmComputeTensorData(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights);
175  InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights);
176 
177  InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights);
178  InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights);
179  InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights);
180 
181  InitializeArmComputeTensorData(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias);
183  InitializeArmComputeTensorData(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias);
184 
185  // Initialise ACL tensor data for optional params
187  {
188  InitializeArmComputeTensorData(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights);
189  InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights);
190  InitializeArmComputeTensorData(*m_InputGateBiasTensor, m_Data.m_InputGateBias);
191  }
192 
194  {
195  InitializeArmComputeTensorData(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights);
196 
197  if (m_Data.m_ProjectionBias != nullptr)
198  {
199  InitializeArmComputeTensorData(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias);
200  }
201  }
202 
204  {
206  {
207  InitializeArmComputeTensorData(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights);
208  }
209 
210  InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights);
211  InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights);
212  }
213 
215  {
217  {
218  InitializeArmComputeTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
219  }
220 
221  InitializeArmComputeTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
222  InitializeArmComputeTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
223  InitializeArmComputeTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
224  }
225 
226  // QLSTM NEON prepare
227  m_QLstmLayer.prepare();
228 
229  FreeUnusedTensors();
230 }
231 
233 {
234  m_QLstmLayer.run();
235 }
236 
238  const TensorInfo& cellStateIn,
239  const TensorInfo& outputStateIn,
240  const TensorInfo& cellStateOut,
241  const TensorInfo& outputStateOut,
242  const TensorInfo& output,
243  const QLstmDescriptor& descriptor,
244  const LstmInputParamsInfo& paramsInfo)
245 {
246  arm_compute::LSTMParams<arm_compute::ITensorInfo> aclParamsInfo;
247 
248  // Input/Output tensor info
249  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
250  const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
251  const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
252 
253  const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
254  const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
255  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
256 
257  // Mandatory tensor info
258  const arm_compute::TensorInfo aclInputToForgetWeightsInfo
259  = BuildArmComputeTensorInfo(paramsInfo.GetInputToForgetWeights());
260  const arm_compute::TensorInfo aclInputToCellWeightsInfo
261  = BuildArmComputeTensorInfo(paramsInfo.GetInputToCellWeights());
262  const arm_compute::TensorInfo aclInputToOutputWeightsInfo
263  = BuildArmComputeTensorInfo(paramsInfo.GetInputToOutputWeights());
264  const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
265  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToForgetWeights());
266  const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
267  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToCellWeights());
268  const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
269  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToOutputWeights());
270  const arm_compute::TensorInfo aclForgetGateBiasInfo
271  = BuildArmComputeTensorInfo(paramsInfo.GetForgetGateBias());
272  const arm_compute::TensorInfo aclCellBiasInfo
273  = BuildArmComputeTensorInfo(paramsInfo.GetCellBias());
274  const arm_compute::TensorInfo aclOutputGateBiasInfo
275  = BuildArmComputeTensorInfo(paramsInfo.GetOutputGateBias());
276 
277  // Optional tensor info
278  arm_compute::TensorInfo aclInputToInputWeightsInfo;
279  arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
280 
281  arm_compute::TensorInfo aclCellToInputWeightsInfo;
282  arm_compute::TensorInfo aclCellToForgetWeightsInfo;
283  arm_compute::TensorInfo aclCellToOutputWeightsInfo;
284 
285  arm_compute::TensorInfo aclInputGateBiasInfo;
286 
287  arm_compute::TensorInfo aclProjectionWeightsInfo;
288  arm_compute::TensorInfo aclProjectionBiasInfo;
289 
290  arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
291  arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
292  arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
293  arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
294 
295 
296  // Create tensor info for optional params if they are enabled
297  if (descriptor.m_PeepholeEnabled)
298  {
299  if (!descriptor.m_CifgEnabled)
300  {
301  aclCellToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToInputWeights());
302  }
303 
304  aclCellToForgetWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToForgetWeights());
305  aclCellToOutputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToOutputWeights());
306 
307  // Set peephole params info
308  aclParamsInfo.set_peephole_params(&aclCellToForgetWeightsInfo,
309  &aclCellToOutputWeightsInfo);
310  }
311 
312  if (descriptor.m_ProjectionEnabled)
313  {
314  aclProjectionWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionWeights());
315 
316  if (paramsInfo.m_ProjectionBias != nullptr)
317  {
318  aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias());
319  }
320 
321  // Set projection params info
322  aclParamsInfo.set_projection_params(
323  &aclProjectionWeightsInfo,
324  paramsInfo.m_ProjectionBias != nullptr ? &aclProjectionBiasInfo : nullptr);
325  }
326 
327 
328 
329  if (descriptor.m_LayerNormEnabled)
330  {
331  if (!descriptor.m_CifgEnabled)
332  {
333  aclInputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputLayerNormWeights());
334 
335  }
336 
337  aclForgetLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetForgetLayerNormWeights());
338  aclCellLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellLayerNormWeights());
339  aclOutputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetOutputLayerNormWeights());
340 
341  // Set layer norm params info
342  aclParamsInfo.set_layer_normalization_params(
343  paramsInfo.m_InputLayerNormWeights != nullptr ? &aclInputLayerNormWeightsInfo : nullptr,
344  &aclForgetLayerNormWeightsInfo,
345  &aclCellLayerNormWeightsInfo,
346  &aclOutputLayerNormWeightsInfo);
347  }
348 
349  if (!descriptor.m_CifgEnabled)
350  {
351  aclInputToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputToInputWeights());
352  aclRecurrentToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToInputWeights());
353  aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias());
354 
355  // Set CIFG params info
356  aclParamsInfo.set_cifg_params(
357  &aclInputToInputWeightsInfo,
358  &aclRecurrentToInputWeightsInfo,
359  paramsInfo.m_CellToInputWeights != nullptr ? &aclCellToInputWeightsInfo : nullptr,
360  &aclInputGateBiasInfo);
361  }
362 
363  // Set scalar descriptor params
364  aclParamsInfo.set_cell_clip_params(descriptor.m_CellClip);
365  aclParamsInfo.set_projection_clip_params(descriptor.m_ProjectionClip);
366  aclParamsInfo.set_hidden_state_params(descriptor.m_HiddenStateZeroPoint, descriptor.m_HiddenStateScale);
367  aclParamsInfo.set_matmul_scale_params(descriptor.m_InputIntermediateScale,
368  descriptor.m_ForgetIntermediateScale,
369  descriptor.m_CellIntermediateScale,
370  descriptor.m_OutputIntermediateScale);
371 
372  // QLSTM NEON validate
373  return arm_compute::NEQLSTMLayer::validate(&aclInputInfo,
374  &aclInputToForgetWeightsInfo,
375  &aclInputToCellWeightsInfo,
376  &aclInputToOutputWeightsInfo,
377  &aclRecurrentToForgetWeightsInfo,
378  &aclRecurrentToCellWeightsInfo,
379  &aclRecurrentToOutputWeightsInfo,
380  &aclForgetGateBiasInfo,
381  &aclCellBiasInfo,
382  &aclOutputGateBiasInfo,
383  &aclCellStateInInfo,
384  &aclOutputStateInInfo,
385  &aclCellStateOutInfo,
386  &aclOutputStateOutInfo,
387  &aclOutputInfo,
388  aclParamsInfo);
389 }
390 
391 void NeonQLstmWorkload::FreeUnusedTensors()
392 {
393  FreeTensorIfUnused(m_InputToInputWeightsTensor);
394  FreeTensorIfUnused(m_InputToForgetWeightsTensor);
395  FreeTensorIfUnused(m_InputToCellWeightsTensor);
396  FreeTensorIfUnused(m_InputToOutputWeightsTensor);
397 
398  FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
399  FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
400  FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
401  FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
402 
403  FreeTensorIfUnused(m_CellToInputWeightsTensor);
404  FreeTensorIfUnused(m_CellToForgetWeightsTensor);
405  FreeTensorIfUnused(m_CellToOutputWeightsTensor);
406 
407  FreeTensorIfUnused(m_InputGateBiasTensor);
408  FreeTensorIfUnused(m_ForgetGateBiasTensor);
409  FreeTensorIfUnused(m_CellBiasTensor);
410  FreeTensorIfUnused(m_OutputGateBiasTensor);
411 
412  FreeTensorIfUnused(m_ProjectionWeightsTensor);
413  FreeTensorIfUnused(m_ProjectionBiasTensor);
414 
415  FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
416  FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
417  FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
418  FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
419 }
420 
421 } //namespace armnn
const ConstCpuTensorHandle * m_CellToForgetWeights
const TensorInfo * m_InputLayerNormWeights
Definition: LstmParams.hpp:106
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
NeonQLstmWorkload(const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
const ConstCpuTensorHandle * m_ProjectionWeights
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
bool m_PeepholeEnabled
Enable/disable peephole.
const ConstCpuTensorHandle * m_ProjectionBias
float m_HiddenStateScale
Hidden State quantization scale.
const ConstCpuTensorHandle * m_ForgetLayerNormWeights
const QLstmQueueDescriptor m_Data
Definition: Workload.hpp:46
float m_OutputIntermediateScale
Output intermediate quantization scale.
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
const ConstCpuTensorHandle * m_CellLayerNormWeights
const ConstCpuTensorHandle * m_RecurrentToCellWeights
const ConstCpuTensorHandle * m_RecurrentToInputWeights
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
const ConstCpuTensorHandle * m_OutputGateBias
const ConstCpuTensorHandle * m_CellBias
Copyright (c) 2020 ARM Limited.
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
virtual void Execute() const override
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
const ConstCpuTensorHandle * m_CellToOutputWeights
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
const ConstCpuTensorHandle * m_OutputLayerNormWeights
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstCpuTensorHandle * m_InputToForgetWeights
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
float m_ProjectionClip
Clipping threshold value for the projection.
float m_InputIntermediateScale
Input intermediate quantization scale.
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
Status
enumeration
Definition: Types.hpp:26
A QLstmDescriptor for the QLstmLayer.
const TensorInfo * m_CellToInputWeights
Definition: LstmParams.hpp:97
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstCpuTensorHandle *handle)
const ConstCpuTensorHandle * m_CellToInputWeights
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
float m_CellClip
Clipping threshold value for the cell state.
const ConstCpuTensorHandle * m_RecurrentToOutputWeights
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
std::vector< ITensorHandle * > m_Outputs
bool m_ProjectionEnabled
Enable/disable the projection layer.
const ConstCpuTensorHandle * m_InputGateBias
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
Contains information about inputs and outputs to a layer.
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
std::vector< ITensorHandle * > m_Inputs
const ConstCpuTensorHandle * m_InputLayerNormWeights
const ConstCpuTensorHandle * m_RecurrentToForgetWeights
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
const ConstCpuTensorHandle * m_ForgetGateBias
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
float m_CellIntermediateScale
Cell intermediate quantization scale.
const ConstCpuTensorHandle * m_InputToOutputWeights
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
const TensorInfo & GetTensorInfo() const
const ConstCpuTensorHandle * m_InputToInputWeights
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
const ConstCpuTensorHandle * m_InputToCellWeights