From 6f92c8e9f8bb38dcf5dccf8deeff5112ecd8e37c Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Wed, 22 Nov 2023 11:41:15 +0000 Subject: Update Doxygen for 23.11 Signed-off-by: Nikhil Raj Change-Id: I47cd933f5002cb94a73aa97689d7b3d9c93cb849 --- 23.11/_neon_lstm_float_workload_8cpp_source.html | 629 +++++++++++++++++++++++ 1 file changed, 629 insertions(+) create mode 100644 23.11/_neon_lstm_float_workload_8cpp_source.html (limited to '23.11/_neon_lstm_float_workload_8cpp_source.html') diff --git a/23.11/_neon_lstm_float_workload_8cpp_source.html b/23.11/_neon_lstm_float_workload_8cpp_source.html new file mode 100644 index 0000000000..d9410c3d8e --- /dev/null +++ b/23.11/_neon_lstm_float_workload_8cpp_source.html @@ -0,0 +1,629 @@ + + + + + + + + +Arm NN: src/backends/neon/workloads/NeonLstmFloatWorkload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.11 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
NeonLstmFloatWorkload.cpp
+
+
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 
+ +
7 #include "NeonWorkloadUtils.hpp"
+
8 
+ + +
11 
+ +
13 
+ +
15 
+
16 namespace armnn
+
17 {
+
18 using namespace armcomputetensorutils;
+
19 
+ +
21  : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
+
22 {
+
23  // Report Profiling Details
+
24  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonLstmFloatWorkload_Construct",
+
25  descriptor.m_Parameters,
+
26  info,
+
27  GetGuid());
+
28 
+
29  arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
+
30 
+
31  // Basic parameters
+
32  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
33  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
+
34 
+
35  m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
36  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
+
37 
+
38  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
39  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
+
40 
+
41  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
42  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
+
43 
+
44  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
45  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
+
46 
+
47  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
48  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
+
49 
+
50  m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
+
51  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
+
52 
+
53  m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
+
54  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
+
55 
+
56  m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
+
57  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
+
58 
+
59  // for future reference: check the AndroidNN API for the logic here
+
60  if (!m_Data.m_Parameters.m_CifgEnabled)
+
61  {
+
62  m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
63  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
+
64 
+
65  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
66  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
+
67 
+
68  m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
69  if (m_Data.m_CellToInputWeights != nullptr)
+
70  {
+
71  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
+
72  }
+
73 
+
74  m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
+
75  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
+
76 
+
77  lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
+
78  m_RecurrentToInputWeightsTensor.get(),
+
79  m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
+
80  m_InputGateBiasTensor.get());
+
81  }
+
82 
+
83  if (m_Data.m_Parameters.m_ProjectionEnabled)
+
84  {
+
85  m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
86  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
+
87 
+
88  m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
+
89  if (m_Data.m_ProjectionBias != nullptr)
+
90  {
+
91  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
+
92  }
+
93 
+
94  lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
+
95  m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
+
96  }
+
97 
+
98  if (m_Data.m_Parameters.m_PeepholeEnabled)
+
99  {
+
100  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
101  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
+
102 
+
103  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
104  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
+
105 
+
106  lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
+
107  }
+
108 
+
109  if (m_Data.m_Parameters.m_LayerNormEnabled)
+
110  {
+
111  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
112  if (!m_Data.m_Parameters.m_CifgEnabled)
+
113  {
+
114  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
+
115  }
+
116 
+
117  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
118  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
+
119 
+
120  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
121  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
+
122 
+
123  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
+
124  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
+
125 
+
126  lstm_param.set_layer_normalization_params(m_Data.m_Parameters.m_CifgEnabled ?
+
127  nullptr : m_InputLayerNormWeightsTensor.get(),
+
128  m_ForgetLayerNormWeightsTensor.get(),
+
129  m_CellLayerNormWeightsTensor.get(),
+
130  m_OutputLayerNormWeightsTensor.get());
+
131  }
+
132 
+
133  const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
134  const arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+
135  const arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+
136 
+
137  arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
+
138  arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
+
139  arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
+
140 
+
141  // Get the batch_size and the num_units from the cellStateIn dimensions
+
142  const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
+
143  const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
+
144  const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
+
145 
+
146  m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
+
147  if (m_Data.m_Parameters.m_CifgEnabled)
+
148  {
+
149  // 2D tensor with dimensions [num_units * 3, batch_size] with CIFG
+
150  armnn::TensorInfo scratchBuffer1({ batch_size, num_units * 3 }, DataType::Float32);
+
151  BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
+
152  }
+
153  else
+
154  {
+
155  // scratch_buffer [num_units * 4, batch_size] without CIFG
+
156  armnn::TensorInfo scratchBuffer2({ batch_size, num_units * 4 }, DataType::Float32);
+
157  BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
+
158  }
+
159 
+
160  float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
+
161  float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
+
162 
+
163  // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
+
164  arm_compute::ActivationLayerInfo activationLayerInfo =
+
165  ConvertLstmActivationFuncToAclLayerInfo(m_Data.m_Parameters.m_ActivationFunc);
+
166 
+
167  m_LstmLayer.configure(&input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(),
+
168  m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
+
169  m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
+
170  m_ForgetGateBiasTensor.get(), m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(),
+
171  &output_state_in, &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
+
172  &cell_state_out, &output, lstm_param, activationLayerInfo,
+
173  cell_threshold, projection_threshold);
+
174 
+
175  armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
+
176 
+
177  InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
+
178  m_Data.m_InputToForgetWeights);
+
179  InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
+
180  m_Data.m_InputToCellWeights);
+
181  InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
+
182  m_Data.m_InputToOutputWeights);
+
183  InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
+
184  m_Data.m_RecurrentToForgetWeights);
+
185  InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
+
186  m_Data.m_RecurrentToCellWeights);
+
187  InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
+
188  m_Data.m_RecurrentToOutputWeights);
+
189  InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
+
190  m_Data.m_ForgetGateBias);
+
191  InitializeArmComputeTensorData(*m_CellBiasTensor,
+
192  m_Data.m_CellBias);
+
193  InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
+
194  m_Data.m_OutputGateBias);
+
195 
+
196  if (!m_Data.m_Parameters.m_CifgEnabled)
+
197  {
+
198  InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
+
199  m_Data.m_InputToInputWeights);
+
200  InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
+
201  m_Data.m_RecurrentToInputWeights);
+
202  if (m_Data.m_CellToInputWeights != nullptr)
+
203  {
+
204  InitializeArmComputeTensorData(*m_CellToInputWeightsTensor,
+
205  m_Data.m_CellToInputWeights);
+
206  }
+
207  InitializeArmComputeTensorData(*m_InputGateBiasTensor,
+
208  m_Data.m_InputGateBias);
+
209  }
+
210 
+
211  if (m_Data.m_Parameters.m_ProjectionEnabled)
+
212  {
+
213  InitializeArmComputeTensorData(*m_ProjectionWeightsTensor,
+
214  m_Data.m_ProjectionWeights);
+
215  if (m_Data.m_ProjectionBias != nullptr)
+
216  {
+
217  InitializeArmComputeTensorData(*m_ProjectionBiasTensor,
+
218  m_Data.m_ProjectionBias);
+
219  }
+
220  }
+
221 
+
222  if (m_Data.m_Parameters.m_PeepholeEnabled)
+
223  {
+
224  InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor,
+
225  m_Data.m_CellToForgetWeights);
+
226  InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor,
+
227  m_Data.m_CellToOutputWeights);
+
228  }
+
229 
+
230  if (m_Data.m_Parameters.m_LayerNormEnabled)
+
231  {
+
232  if (!m_Data.m_Parameters.m_CifgEnabled)
+
233  {
+
234  InitializeArmComputeTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
+
235  }
+
236  InitializeArmComputeTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
+
237  InitializeArmComputeTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
+
238  InitializeArmComputeTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
+
239  }
+
240 
+
241  // Force Compute Library to perform the necessary copying and reshaping, after which
+
242  // delete all the input tensors that will no longer be needed
+
243  m_LstmLayer.prepare();
+
244  FreeUnusedTensors();
+
245 }
+
246 
+ +
248 {
+
249  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonLstmFloatWorkload_Execute");
+
250  m_LstmLayer.run();
+
251 }
+
252 
+ +
254  const TensorInfo& outputStateIn,
+
255  const TensorInfo& cellStateIn,
+
256  const TensorInfo& scratchBuffer,
+
257  const TensorInfo& outputStateOut,
+
258  const TensorInfo& cellStateOut,
+
259  const TensorInfo& output,
+
260  const LstmDescriptor& descriptor,
+
261  const LstmInputParamsInfo& paramsInfo)
+
262 {
+
263  arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
+
264 
+
265  // The inputs and outputs
+
266  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+
267  const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
+
268  const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
+
269  const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
+
270  const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
+
271  const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
+
272  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
273 
+
274  // Basic parameters
+
275  const arm_compute::TensorInfo aclInputToForgetWeightsInfo
+
276  = BuildArmComputeTensorInfo(paramsInfo.GetInputToForgetWeights());
+
277  const arm_compute::TensorInfo aclInputToCellWeightsInfo
+
278  = BuildArmComputeTensorInfo(paramsInfo.GetInputToCellWeights());
+
279  const arm_compute::TensorInfo aclInputToOutputWeightsInfo
+
280  = BuildArmComputeTensorInfo(paramsInfo.GetInputToOutputWeights());
+
281  const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
+
282  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToForgetWeights());
+
283  const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
+
284  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToCellWeights());
+
285  const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
+
286  = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToOutputWeights());
+
287  const arm_compute::TensorInfo aclForgetGateBiasInfo
+
288  = BuildArmComputeTensorInfo(paramsInfo.GetForgetGateBias());
+
289  const arm_compute::TensorInfo aclCellBiasInfo
+
290  = BuildArmComputeTensorInfo(paramsInfo.GetCellBias());
+
291  const arm_compute::TensorInfo aclOutputGateBiasInfo
+
292  = BuildArmComputeTensorInfo(paramsInfo.GetOutputGateBias());
+
293 
+
294  arm_compute::TensorInfo aclInputToInputWeightsInfo;
+
295  arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
+
296  arm_compute::TensorInfo aclCellToInputWeightsInfo;
+
297  arm_compute::TensorInfo aclInputGateBiasInfo;
+
298  arm_compute::TensorInfo aclProjectionWeightsInfo;
+
299  arm_compute::TensorInfo aclProjectionBiasInfo;
+
300  arm_compute::TensorInfo aclCellToForgetWeightsInfo;
+
301  arm_compute::TensorInfo aclCellToOutputWeightsInfo;
+
302 
+
303  arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
+
304  arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
+
305  arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
+
306  arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
+
307 
+
308 
+
309  if (!descriptor.m_CifgEnabled)
+
310  {
+
311  if (descriptor.m_PeepholeEnabled)
+
312  {
+
313  aclCellToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToInputWeights());
+
314  }
+
315  aclInputToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputToInputWeights());
+
316  aclRecurrentToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToInputWeights());
+
317  aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias());
+
318 
+
319  lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo, &aclRecurrentToInputWeightsInfo,
+
320  descriptor.m_PeepholeEnabled ? &aclCellToInputWeightsInfo : nullptr,
+
321  &aclInputGateBiasInfo);
+
322  }
+
323 
+
324  if (descriptor.m_ProjectionEnabled)
+
325  {
+
326  if (paramsInfo.m_ProjectionBias != nullptr)
+
327  {
+
328  aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias());
+
329  }
+
330  aclProjectionWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionWeights());
+
331 
+
332  lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
+
333  paramsInfo.m_ProjectionBias != nullptr ?
+
334  &aclProjectionBiasInfo : nullptr);
+
335  }
+
336 
+
337  if (descriptor.m_PeepholeEnabled)
+
338  {
+
339  aclCellToForgetWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToForgetWeights());
+
340  aclCellToOutputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToOutputWeights());
+
341 
+
342  lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
+
343  }
+
344 
+
345  if (descriptor.m_LayerNormEnabled)
+
346  {
+
347  if (!descriptor.m_CifgEnabled)
+
348  {
+
349  aclInputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputLayerNormWeights());
+
350  }
+
351  aclForgetLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetForgetLayerNormWeights());
+
352  aclCellLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellLayerNormWeights());
+
353  aclOutputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetOutputLayerNormWeights());
+
354 
+
355  lstm_params_info.set_layer_normalization_params(descriptor.m_CifgEnabled ?
+
356  nullptr : &aclInputLayerNormWeightsInfo,
+
357  &aclForgetLayerNormWeightsInfo,
+
358  &aclCellLayerNormWeightsInfo,
+
359  &aclOutputLayerNormWeightsInfo);
+
360  }
+
361 
+
362  float cell_threshold = descriptor.m_ClippingThresCell;
+
363  float projection_threshold = descriptor.m_ClippingThresProj;
+
364 
+
365  // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
+
366  arm_compute::ActivationLayerInfo activationLayerInfo =
+ +
368 
+
369  return arm_compute::NELSTMLayer::validate(&aclInputInfo,
+
370  &aclInputToForgetWeightsInfo,
+
371  &aclInputToCellWeightsInfo,
+
372  &aclInputToOutputWeightsInfo,
+
373  &aclRecurrentToForgetWeightsInfo,
+
374  &aclRecurrentToCellWeightsInfo,
+
375  &aclRecurrentToOutputWeightsInfo,
+
376  &aclForgetGateBiasInfo,
+
377  &aclCellBiasInfo,
+
378  &aclOutputGateBiasInfo,
+
379  &aclOutputStateInInfo,
+
380  &aclCellStateInInfo,
+
381  &aclScratchBufferInfo,
+
382  &aclOutputStateOutInfo,
+
383  &aclCellStateOutInfo,
+
384  &aclOutputInfo,
+
385  lstm_params_info,
+
386  activationLayerInfo,
+
387  cell_threshold,
+
388  projection_threshold);
+
389 }
+
390 
+
391 void NeonLstmFloatWorkload::FreeUnusedTensors()
+
392 {
+
393  FreeTensorIfUnused(m_InputToInputWeightsTensor);
+
394  FreeTensorIfUnused(m_InputToForgetWeightsTensor);
+
395  FreeTensorIfUnused(m_InputToCellWeightsTensor);
+
396  FreeTensorIfUnused(m_InputToOutputWeightsTensor);
+
397  FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
+
398  FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
+
399  FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
+
400  FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
+
401  FreeTensorIfUnused(m_CellToInputWeightsTensor);
+
402  FreeTensorIfUnused(m_CellToForgetWeightsTensor);
+
403  FreeTensorIfUnused(m_CellToOutputWeightsTensor);
+
404  FreeTensorIfUnused(m_InputGateBiasTensor);
+
405  FreeTensorIfUnused(m_ForgetGateBiasTensor);
+
406  FreeTensorIfUnused(m_CellBiasTensor);
+
407  FreeTensorIfUnused(m_OutputGateBiasTensor);
+
408  FreeTensorIfUnused(m_ProjectionWeightsTensor);
+
409  FreeTensorIfUnused(m_ProjectionBiasTensor);
+
410  FreeTensorIfUnused(m_ScratchBuffer);
+
411  FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
+
412  FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
+
413  FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
+
414  FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
+
415 }
+
416 
+ +
418 {
+
419  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+
420  this->m_Data.m_Inputs[slot] = tensorHandle;
+
421  try
+
422  {
+
423  Reconfigure();
+
424  }
+ +
426  {
+
427  // Cannot reconfigure, revert the slot back and throw the exception.
+
428  this->m_Data.m_Inputs[slot] = backupHandle;
+
429  throw e;
+
430  }
+
431 }
+
432 
+
433 // Replace output tensor handle with the given TensorHandle
+ +
435 {
+
436  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
+
437  this->m_Data.m_Inputs[slot] = tensorHandle;
+
438  try
+
439  {
+
440  Reconfigure();
+
441  }
+ +
443  {
+
444  // Cannot reconfigure, revert the slot back and throw the exception.
+
445  this->m_Data.m_Inputs[slot] = backupHandle;
+
446  throw e;
+
447  }
+
448 }
+
449 
+
450 void NeonLstmFloatWorkload::Reconfigure()
+
451 {
+
452  throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
+
453 }
+
454 
+
455 } //namespace armnn
+
+
+
const TensorInfo & GetCellBias() const
Definition: LstmParams.hpp:173
+
const TensorInfo & GetInputToCellWeights() const
Definition: LstmParams.hpp:129
+ + +
const TensorInfo & GetProjectionBias() const
Definition: LstmParams.hpp:185
+ + +
const TensorInfo & GetInputGateBias() const
Definition: LstmParams.hpp:165
+
const TensorInfo & GetRecurrentToInputWeights() const
Definition: LstmParams.hpp:137
+
const TensorInfo & GetRecurrentToForgetWeights() const
Definition: LstmParams.hpp:141
+ +
const TensorInfo & GetRecurrentToCellWeights() const
Definition: LstmParams.hpp:145
+ +
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
+
const TensorInfo & GetInputLayerNormWeights() const
Definition: LstmParams.hpp:189
+
bool m_PeepholeEnabled
Enable/disable peephole.
+
float m_ClippingThresProj
Clipping threshold value for the projection.
+ +
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
+ +
const TensorInfo & GetCellToInputWeights() const
Definition: LstmParams.hpp:153
+
const TensorInfo & GetRecurrentToOutputWeights() const
Definition: LstmParams.hpp:149
+
const TensorInfo & GetInputToInputWeights() const
Definition: LstmParams.hpp:121
+
Contains information about TensorInfos of a layer.
+
const TensorInfo & GetForgetGateBias() const
Definition: LstmParams.hpp:169
+
arm_compute::ActivationLayerInfo ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
+
const TensorInfo & GetCellToForgetWeights() const
Definition: LstmParams.hpp:157
+
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
+ + +
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
+
std::vector< ITensorHandle * > m_Outputs
+
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
+
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:67
+
An LstmDescriptor for the LstmLayer.
+ +
Status
Definition: Types.hpp:42
+
NeonLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
+
const TensorInfo & GetInputToOutputWeights() const
Definition: LstmParams.hpp:133
+
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
+
QueueDescriptor m_Data
Definition: Workload.hpp:89
+
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
+
const TensorInfo & GetOutputGateBias() const
Definition: LstmParams.hpp:177
+
bool m_LayerNormEnabled
Enable/disable layer normalization.
+ +
const TensorInfo & GetCellToOutputWeights() const
Definition: LstmParams.hpp:161
+ +
const TensorInfo & GetOutputLayerNormWeights() const
Definition: LstmParams.hpp:201
+
Copyright (c) 2021 ARM Limited and Contributors.
+ + +
bool m_ProjectionEnabled
Enable/disable the projection layer.
+
const TensorInfo & GetProjectionWeights() const
Definition: LstmParams.hpp:181
+ +
uint32_t m_ActivationFunc
The activation function to use.
+
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
+
virtual void Execute() const override
+
float m_ClippingThresCell
Clipping threshold value for the cell state.
+
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
+
const TensorInfo & GetForgetLayerNormWeights() const
Definition: LstmParams.hpp:193
+
const TensorInfo & GetCellLayerNormWeights() const
Definition: LstmParams.hpp:197
+
std::vector< ITensorHandle * > m_Inputs
+
const TensorInfo & GetInputToForgetWeights() const
Definition: LstmParams.hpp:125
+ + + + -- cgit v1.2.1