ArmNN
 22.08
UnidirectionalSequenceLstmLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/LstmParams.hpp>
10 #include <armnn/TypesUtils.hpp>
13 
14 namespace armnn
15 {
16 
19 {
20 }
21 
22 std::unique_ptr<IWorkload> UnidirectionalSequenceLstmLayer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
25 
26  // Basic parameters
34  descriptor.m_CellBias = m_BasicParameters.m_CellBias.get();
36 
37  // Cifg parameters
39  {
43  }
44 
45  // Projection parameters
47  {
50  }
51 
52  // Peephole parameters
54  {
56  {
58  }
61  }
62 
63  // Layer normalisation parameters
65  {
67  {
69  }
73  }
74 
75  SetAdditionalInfo(descriptor);
76 
77  return factory.CreateWorkload(LayerType::UnidirectionalSequenceLstm, descriptor, PrepInfoAndDesc(descriptor));
78 }
79 
81 {
82  auto layer = CloneBase<UnidirectionalSequenceLstmLayer>(graph, m_Param, GetName());
83 
86  : nullptr;
87  layer->m_BasicParameters.m_InputToCellWeights = m_BasicParameters.m_InputToCellWeights ?
89  layer->m_BasicParameters.m_InputToOutputWeights = m_BasicParameters.m_InputToOutputWeights ?
91  layer->m_BasicParameters.m_RecurrentToForgetWeights = m_BasicParameters.m_RecurrentToForgetWeights ?
93  layer->m_BasicParameters.m_RecurrentToCellWeights = m_BasicParameters.m_RecurrentToCellWeights ?
95  layer->m_BasicParameters.m_RecurrentToOutputWeights = m_BasicParameters.m_RecurrentToOutputWeights ?
97  layer->m_BasicParameters.m_ForgetGateBias = m_BasicParameters.m_ForgetGateBias ?
99  layer->m_BasicParameters.m_CellBias = m_BasicParameters.m_CellBias ?
100  m_BasicParameters.m_CellBias : nullptr;
101  layer->m_BasicParameters.m_OutputGateBias = m_BasicParameters.m_OutputGateBias ?
103 
104  if (!m_Param.m_CifgEnabled)
105  {
106  layer->m_CifgParameters.m_InputToInputWeights = m_CifgParameters.m_InputToInputWeights ?
108  layer->m_CifgParameters.m_RecurrentToInputWeights = m_CifgParameters.m_RecurrentToInputWeights ?
110  layer->m_CifgParameters.m_InputGateBias = m_CifgParameters.m_InputGateBias ?
112  }
113 
114  if (m_Param.m_ProjectionEnabled)
115  {
116  layer->m_ProjectionParameters.m_ProjectionWeights = m_ProjectionParameters.m_ProjectionWeights ?
118  layer->m_ProjectionParameters.m_ProjectionBias = m_ProjectionParameters.m_ProjectionBias ?
120  }
121 
122  if (m_Param.m_PeepholeEnabled)
123  {
124  if (!m_Param.m_CifgEnabled)
125  {
126  layer->m_PeepholeParameters.m_CellToInputWeights = m_PeepholeParameters.m_CellToInputWeights ?
128  }
129  layer->m_PeepholeParameters.m_CellToForgetWeights = m_PeepholeParameters.m_CellToForgetWeights ?
131  layer->m_PeepholeParameters.m_CellToOutputWeights = m_PeepholeParameters.m_CellToOutputWeights ?
133  }
134 
135  if (m_Param.m_LayerNormEnabled)
136  {
137  layer->m_LayerNormParameters.m_InputLayerNormWeights = m_LayerNormParameters.m_InputLayerNormWeights ?
139  layer->m_LayerNormParameters.m_ForgetLayerNormWeights = m_LayerNormParameters.m_ForgetLayerNormWeights ?
141  layer->m_LayerNormParameters.m_CellLayerNormWeights = m_LayerNormParameters.m_CellLayerNormWeights ?
143  layer->m_LayerNormParameters.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights ?
145  }
146 
147  return std::move(layer);
148 }
149 
151  const std::vector<TensorShape>& inputShapes) const
152 {
153  ARMNN_ASSERT(inputShapes.size() == 3);
154 
155  // Get input values for validation
156  unsigned int outputSize = inputShapes[1][1];
157 
158  std::vector<TensorShape> outShapes;
159  if (m_Param.m_TimeMajor)
160  {
161  outShapes.push_back(TensorShape({inputShapes[0][0], inputShapes[0][1], outputSize}));
162  }
163  else
164  {
165  outShapes.push_back(TensorShape({inputShapes[0][0], inputShapes[0][1], outputSize}));
166  }
167  return outShapes;
168 }
169 
171 {
173 
174  const TensorShape& outputShape = GetOutputSlot(2).GetTensorInfo().GetShape();
175 
177 
178  auto inferredShapes = InferOutputShapes( {
182  });
183 
184  ARMNN_ASSERT(inferredShapes.size() == 1);
185 
186  // Check if the weights are nullptr
188  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
190  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
192  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
194  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights "
195  "should not be null.");
197  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
199  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights "
200  "should not be null.");
202  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
204  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_CellBias should not be null.");
206  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
207 
208  if (!m_Param.m_CifgEnabled)
209  {
211  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
213  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights "
214  "should not be null.");
216  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
217  }
218  else
219  {
221  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value "
222  "when CIFG is enabled.");
224  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value "
225  "when CIFG is enabled.");
227  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not have a value "
228  "when CIFG is enabled.");
229  }
230 
232  {
234  "UnidirectionalSequenceLstmLayer: m_ProjectionParameters.m_ProjectionWeights "
235  "should not be null.");
236  }
237 
239  {
240  if (!m_Param.m_CifgEnabled)
241  {
243  "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToInputWeights "
244  "should not be null "
245  "when Peephole is enabled and CIFG is disabled.");
246  }
248  "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToForgetWeights "
249  "should not be null.");
251  "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToOutputWeights "
252  "should not be null.");
253  }
254 
256  {
258  {
260  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_inputLayerNormWeights "
261  "should not be null.");
262  }
264  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights "
265  "should not be null.");
267  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_cellLayerNormWeights "
268  "should not be null.");
270  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_outputLayerNormWeights "
271  "should not be null.");
272  }
273 
274  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "UnidirectionalSequenceLstmLayer");
275 }
276 
278 {
279  // For API stability DO NOT ALTER order and add new members to the end of vector
289 
290  // Cifg parameters
294 
295  // Projection parameters
298 
299  // Peephole parameters
303 
304  // Layer normalisation parameters
309 }
310 
312 {
313  std::vector<ConstTensor> constTensors;
314 
315  LstmDescriptor descriptor = GetParameters();
316 
326 
327  // Cifg parameters
331 
332  // Projection parameters
335 
336  // Peephole parameters
340 
341  // Layer normalisation parameters
346 
347  // First add mandatory/basic parameters
349  {
350  constTensors.emplace_back(ConstTensor(managedInputToForgetWeights.GetTensorInfo(),
351  managedInputToForgetWeights.Map()));
352  }
354  {
355  constTensors.emplace_back(ConstTensor(managedInputToCellWeights.GetTensorInfo(),
356  managedInputToCellWeights.Map()));
357  }
359  {
360  constTensors.emplace_back(ConstTensor(managedInputToOutputWeights.GetTensorInfo(),
361  managedInputToOutputWeights.Map()));
362  }
364  {
365  constTensors.emplace_back(ConstTensor(
366  managedRecurrentToForgetWeights.GetTensorInfo(),
367  managedRecurrentToForgetWeights.Map()));
368  }
370  {
371  constTensors.emplace_back(ConstTensor(
372  managedRecurrentToCellWeights.GetTensorInfo(),
373  managedRecurrentToCellWeights.Map()));
374  }
376  {
377  constTensors.emplace_back(ConstTensor(
378  managedRecurrentToOutputWeights.GetTensorInfo(),
379  managedRecurrentToOutputWeights.Map()));
380  }
381  if (m_BasicParameters.m_ForgetGateBias != nullptr)
382  {
383  constTensors.emplace_back(ConstTensor(managedForgetGateBias.GetTensorInfo(),
384  managedForgetGateBias.Map()));
385  }
386  if (m_BasicParameters.m_CellBias != nullptr)
387  {
388  constTensors.emplace_back(ConstTensor(managedCellBias.GetTensorInfo(),
389  managedCellBias.Map()));
390  }
391  if (m_BasicParameters.m_OutputGateBias != nullptr)
392  {
393  constTensors.emplace_back(ConstTensor(managedOutputGateBias.GetTensorInfo(),
394  managedOutputGateBias.Map()));
395  }
396 
397  // Add cifg parameters
398  if (!descriptor.m_CifgEnabled)
399  {
401  {
402  constTensors.emplace_back(ConstTensor(managedInputToInputWeights.GetTensorInfo(),
403  managedInputToInputWeights.Map()));
404  }
406  {
407  constTensors.emplace_back(ConstTensor(
408  managedRecurrentToInputWeights.GetTensorInfo(),
409  managedRecurrentToInputWeights.Map()));
410  }
411  if (m_CifgParameters.m_InputGateBias != nullptr)
412  {
413  constTensors.emplace_back(ConstTensor(managedInputGateBias.GetTensorInfo(),
414  managedInputGateBias.Map()));
415  }
416  }
417 
418  // Add peephole parameters
419  if (descriptor.m_PeepholeEnabled)
420  {
421  if (!descriptor.m_CifgEnabled)
422  {
424  {
425  constTensors.emplace_back(ConstTensor(managedCellToInputWeights.GetTensorInfo(),
426  managedCellToInputWeights.Map()));
427  }
428  }
430  {
431  constTensors.emplace_back(ConstTensor(managedCellToForgetWeights.GetTensorInfo(),
432  managedCellToForgetWeights.Map()));
433  }
435  {
436  constTensors.emplace_back(ConstTensor(managedCellToOutputWeights.GetTensorInfo(),
437  managedCellToOutputWeights.Map()));
438  }
439  }
440 
441  // Add projection parameters
442  if (descriptor.m_ProjectionEnabled)
443  {
445  {
446  constTensors.emplace_back(ConstTensor(managedProjectionWeights.GetTensorInfo(),
447  managedProjectionWeights.Map()));
448  }
450  {
451  constTensors.emplace_back(ConstTensor(managedProjectionBias.GetTensorInfo(),
452  managedProjectionBias.Map()));
453  }
454  }
455 
456  // Add norm parameters
457  if (descriptor.m_LayerNormEnabled)
458  {
459  if (!descriptor.m_CifgEnabled)
460  {
462  {
463  constTensors.emplace_back(ConstTensor(managedInputLayerNormWeights.GetTensorInfo(),
464  managedInputLayerNormWeights.Map()));
465  }
466  }
468  {
469  constTensors.emplace_back(ConstTensor(managedForgetLayerNormWeights.GetTensorInfo(),
470  managedForgetLayerNormWeights.Map()));
471  }
473  {
474  constTensors.emplace_back(ConstTensor(managedCellLayerNormWeights.GetTensorInfo(),
475  managedCellLayerNormWeights.Map()));
476  }
478  {
479  constTensors.emplace_back(ConstTensor(managedOutputLayerNormWeights.GetTensorInfo(),
480  managedOutputLayerNormWeights.Map()));
481  }
482  }
483 
484  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
485 }
486 
487 } // namespace armnn
std::shared_ptr< ConstTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_ProjectionEnabled
Enable/disable the projection layer.
std::shared_ptr< ConstTensorHandle > m_OutputLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
LstmDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
UnidirectionalSequenceLstmLayer(const LstmDescriptor &param, const char *name)
Constructor to create a UnidirectionalSequenceLstmLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of UnidirectionalSequenceLstmLa...
std::shared_ptr< ConstTensorHandle > m_InputLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the UnidirectionalSequence LSTM type.
std::shared_ptr< ConstTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
bool m_TimeMajor
Enable/disable time major.
const TensorInfo & GetTensorInfo() const
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
const LstmDescriptor & GetParameters() const override
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:206
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
std::shared_ptr< ConstTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
std::shared_ptr< ConstTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:114
An LstmDescriptor for the LstmLayer.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
UnidirectionalSequenceLstmLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::shared_ptr< ConstTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
bool m_PeepholeEnabled
Enable/disable peephole.
std::shared_ptr< ConstTensorHandle > m_CellLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::shared_ptr< ConstTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
std::shared_ptr< ConstTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
This layer represents a LSTM operation.
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::shared_ptr< ConstTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Layer::ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:319
const ConstTensorHandle * m_RecurrentToOutputWeights
std::shared_ptr< ConstTensorHandle > m_ForgetLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
const ConstTensorHandle * m_RecurrentToForgetWeights
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:423
std::shared_ptr< ConstTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:468