ArmNN
 21.08
NeonLstmFloatWorkload Class Reference

#include <NeonLstmFloatWorkload.hpp>

Inheritance diagram for NeonLstmFloatWorkload:
TypedWorkload< QueueDescriptor, DataTypes > BaseWorkload< QueueDescriptor > IWorkload

Public Member Functions

 NeonLstmFloatWorkload (const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual void Execute () const override
 
- Public Member Functions inherited from TypedWorkload< QueueDescriptor, DataTypes >
 TypedWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
- Public Member Functions inherited from BaseWorkload< QueueDescriptor >
 BaseWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ExecuteAsync (WorkingMemDescriptor &workingMemDescriptor) override
 
void PostAllocationConfigure () override
 
const QueueDescriptorGetData () const
 
profiling::ProfilingGuid GetGuid () const final
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< QueueDescriptor >
QueueDescriptor m_Data
 
const profiling::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 19 of file NeonLstmFloatWorkload.hpp.

Constructor & Destructor Documentation

◆ NeonLstmFloatWorkload()

NeonLstmFloatWorkload ( const LstmQueueDescriptor descriptor,
const WorkloadInfo info 
)

Definition at line 19 of file NeonLstmFloatWorkload.cpp.

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

20  : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
21 {
22  // Report Profiling Details
23  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonLstmFloatWorkload_Construct",
24  descriptor.m_Parameters,
25  info,
26  this->GetGuid());
27 
28  arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
29 
30  // Basic parameters
31  m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
32  BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
33 
34  m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
35  BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
36 
37  m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
38  BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
39 
40  m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
41  BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
42 
43  m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
44  BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
45 
46  m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
47  BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
48 
49  m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
50  BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
51 
52  m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
53  BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
54 
55  m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
56  BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
57 
58  // for future reference: check the AndroidNN API for the logic here
59  if (!m_Data.m_Parameters.m_CifgEnabled)
60  {
61  m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
62  BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
63 
64  m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
65  BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
66 
67  m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
68  if (m_Data.m_CellToInputWeights != nullptr)
69  {
70  BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
71  }
72 
73  m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
74  BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
75 
76  lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
77  m_RecurrentToInputWeightsTensor.get(),
78  m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
79  m_InputGateBiasTensor.get());
80  }
81 
82  if (m_Data.m_Parameters.m_ProjectionEnabled)
83  {
84  m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
85  BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
86 
87  m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
88  if (m_Data.m_ProjectionBias != nullptr)
89  {
90  BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
91  }
92 
93  lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
94  m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
95  }
96 
97  if (m_Data.m_Parameters.m_PeepholeEnabled)
98  {
99  m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
100  BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
101 
102  m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
103  BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
104 
105  lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
106  }
107 
108  if (m_Data.m_Parameters.m_LayerNormEnabled)
109  {
110  m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
111  if (!m_Data.m_Parameters.m_CifgEnabled)
112  {
113  BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
114  }
115 
116  m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
117  BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
118 
119  m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
120  BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
121 
122  m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
123  BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
124 
125  lstm_param.set_layer_normalization_params(m_Data.m_Parameters.m_CifgEnabled ?
126  nullptr : m_InputLayerNormWeightsTensor.get(),
127  m_ForgetLayerNormWeightsTensor.get(),
128  m_CellLayerNormWeightsTensor.get(),
129  m_OutputLayerNormWeightsTensor.get());
130  }
131 
132  const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
133  const arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
134  const arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
135 
136  arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
137  arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
138  arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
139 
140  // Get the batch_size and the num_units from the cellStateIn dimensions
141  const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
142  const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
143  const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
144 
145  m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
146  if (m_Data.m_Parameters.m_CifgEnabled)
147  {
148  // 2D tensor with dimensions [num_units * 3, batch_size] with CIFG
149  armnn::TensorInfo scratchBuffer1({ batch_size, num_units * 3 }, DataType::Float32);
150  BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
151  }
152  else
153  {
154  // scratch_buffer [num_units * 4, batch_size] without CIFG
155  armnn::TensorInfo scratchBuffer2({ batch_size, num_units * 4 }, DataType::Float32);
156  BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
157  }
158 
159  float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
160  float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
161 
162  // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
163  arm_compute::ActivationLayerInfo activationLayerInfo;
164  if (m_Data.m_Parameters.m_ActivationFunc == 0)
165  {
166  // no activation, do nothing
167  }
168  else if (m_Data.m_Parameters.m_ActivationFunc == 1)
169  {
170  activationLayerInfo = arm_compute::ActivationLayerInfo(
171  arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
172  }
173  else if (m_Data.m_Parameters.m_ActivationFunc == 3)
174  {
175  activationLayerInfo = arm_compute::ActivationLayerInfo(
176  arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
177  }
178  else if (m_Data.m_Parameters.m_ActivationFunc == 4)
179  {
180  activationLayerInfo = arm_compute::ActivationLayerInfo(
181  arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
182  }
183  else if (m_Data.m_Parameters.m_ActivationFunc == 6)
184  {
185  activationLayerInfo = arm_compute::ActivationLayerInfo(
186  arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
187  }
188  else
189  {
190  throw armnn::Exception("Wrong Type of Activation Function!");
191  }
192 
193 
194  m_LstmLayer.configure(&input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(),
195  m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
196  m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
197  m_ForgetGateBiasTensor.get(), m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(),
198  &output_state_in, &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
199  &cell_state_out, &output, lstm_param, activationLayerInfo,
200  cell_threshold, projection_threshold);
201 
202  armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
203 
204  InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
205  m_Data.m_InputToForgetWeights);
206  InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
207  m_Data.m_InputToCellWeights);
208  InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
209  m_Data.m_InputToOutputWeights);
210  InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
211  m_Data.m_RecurrentToForgetWeights);
212  InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
213  m_Data.m_RecurrentToCellWeights);
214  InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
215  m_Data.m_RecurrentToOutputWeights);
216  InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
217  m_Data.m_ForgetGateBias);
218  InitializeArmComputeTensorData(*m_CellBiasTensor,
219  m_Data.m_CellBias);
220  InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
221  m_Data.m_OutputGateBias);
222 
223  if (!m_Data.m_Parameters.m_CifgEnabled)
224  {
225  InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
226  m_Data.m_InputToInputWeights);
227  InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
228  m_Data.m_RecurrentToInputWeights);
229  if (m_Data.m_CellToInputWeights != nullptr)
230  {
231  InitializeArmComputeTensorData(*m_CellToInputWeightsTensor,
232  m_Data.m_CellToInputWeights);
233  }
234  InitializeArmComputeTensorData(*m_InputGateBiasTensor,
235  m_Data.m_InputGateBias);
236  }
237 
238  if (m_Data.m_Parameters.m_ProjectionEnabled)
239  {
240  InitializeArmComputeTensorData(*m_ProjectionWeightsTensor,
241  m_Data.m_ProjectionWeights);
242  if (m_Data.m_ProjectionBias != nullptr)
243  {
244  InitializeArmComputeTensorData(*m_ProjectionBiasTensor,
245  m_Data.m_ProjectionBias);
246  }
247  }
248 
249  if (m_Data.m_Parameters.m_PeepholeEnabled)
250  {
251  InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor,
252  m_Data.m_CellToForgetWeights);
253  InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor,
254  m_Data.m_CellToOutputWeights);
255  }
256 
257  if (m_Data.m_Parameters.m_LayerNormEnabled)
258  {
259  if (!m_Data.m_Parameters.m_CifgEnabled)
260  {
261  InitializeArmComputeTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
262  }
263  InitializeArmComputeTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
264  InitializeArmComputeTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
265  InitializeArmComputeTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
266  }
267 
268  // Force Compute Library to perform the necessary copying and reshaping, after which
269  // delete all the input tensors that will no longer be needed
270  m_LstmLayer.prepare();
271  FreeUnusedTensors();
272 }
QueueDescriptor m_Data
Definition: Workload.hpp:58
std::vector< ITensorHandle * > m_Outputs
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:226
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstTensorHandle *handle)
std::vector< ITensorHandle * > m_Inputs

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 274 of file NeonLstmFloatWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID, and BaseWorkload< QueueDescriptor >::GetGuid().

275 {
276  ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonLstmFloatWorkload_Execute", this->GetGuid());
277  m_LstmLayer.run();
278 }
profiling::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:55
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid)

The documentation for this class was generated from the following files: