ArmNN
 22.05
UnidirectionalSequenceLstmLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/LstmParams.hpp>
10 #include <armnn/TypesUtils.hpp>
13 
14 namespace armnn
15 {
16 
19 {
20 }
21 
22 std::unique_ptr<IWorkload> UnidirectionalSequenceLstmLayer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
25 
26  // Basic parameters
34  descriptor.m_CellBias = m_BasicParameters.m_CellBias.get();
36 
37  // Cifg parameters
39  {
43  }
44 
45  // Projection parameters
47  {
50  }
51 
52  // Peephole parameters
54  {
56  {
58  }
61  }
62 
63  // Layer normalisation parameters
65  {
67  {
69  }
73  }
74 
75  SetAdditionalInfo(descriptor);
76 
77  return factory.CreateWorkload(LayerType::UnidirectionalSequenceLstm, descriptor, PrepInfoAndDesc(descriptor));
78 }
79 
81 {
82  auto layer = CloneBase<UnidirectionalSequenceLstmLayer>(graph, m_Param, GetName());
83 
86  : nullptr;
87  layer->m_BasicParameters.m_InputToCellWeights = m_BasicParameters.m_InputToCellWeights ?
89  layer->m_BasicParameters.m_InputToOutputWeights = m_BasicParameters.m_InputToOutputWeights ?
91  layer->m_BasicParameters.m_RecurrentToForgetWeights = m_BasicParameters.m_RecurrentToForgetWeights ?
93  layer->m_BasicParameters.m_RecurrentToCellWeights = m_BasicParameters.m_RecurrentToCellWeights ?
95  layer->m_BasicParameters.m_RecurrentToOutputWeights = m_BasicParameters.m_RecurrentToOutputWeights ?
97  layer->m_BasicParameters.m_ForgetGateBias = m_BasicParameters.m_ForgetGateBias ?
99  layer->m_BasicParameters.m_CellBias = m_BasicParameters.m_CellBias ?
100  m_BasicParameters.m_CellBias : nullptr;
101  layer->m_BasicParameters.m_OutputGateBias = m_BasicParameters.m_OutputGateBias ?
103 
104  if (!m_Param.m_CifgEnabled)
105  {
106  layer->m_CifgParameters.m_InputToInputWeights = m_CifgParameters.m_InputToInputWeights ?
108  layer->m_CifgParameters.m_RecurrentToInputWeights = m_CifgParameters.m_RecurrentToInputWeights ?
110  layer->m_CifgParameters.m_InputGateBias = m_CifgParameters.m_InputGateBias ?
112  }
113 
114  if (m_Param.m_ProjectionEnabled)
115  {
116  layer->m_ProjectionParameters.m_ProjectionWeights = m_ProjectionParameters.m_ProjectionWeights ?
118  layer->m_ProjectionParameters.m_ProjectionBias = m_ProjectionParameters.m_ProjectionBias ?
120  }
121 
122  if (m_Param.m_PeepholeEnabled)
123  {
124  if (!m_Param.m_CifgEnabled)
125  {
126  layer->m_PeepholeParameters.m_CellToInputWeights = m_PeepholeParameters.m_CellToInputWeights ?
128  }
129  layer->m_PeepholeParameters.m_CellToForgetWeights = m_PeepholeParameters.m_CellToForgetWeights ?
131  layer->m_PeepholeParameters.m_CellToOutputWeights = m_PeepholeParameters.m_CellToOutputWeights ?
133  }
134 
135  if (m_Param.m_LayerNormEnabled)
136  {
137  layer->m_LayerNormParameters.m_InputLayerNormWeights = m_LayerNormParameters.m_InputLayerNormWeights ?
139  layer->m_LayerNormParameters.m_ForgetLayerNormWeights = m_LayerNormParameters.m_ForgetLayerNormWeights ?
141  layer->m_LayerNormParameters.m_CellLayerNormWeights = m_LayerNormParameters.m_CellLayerNormWeights ?
143  layer->m_LayerNormParameters.m_OutputLayerNormWeights = m_LayerNormParameters.m_OutputLayerNormWeights ?
145  }
146 
147  return std::move(layer);
148 }
149 
151  const std::vector<TensorShape>& inputShapes) const
152 {
153  ARMNN_ASSERT(inputShapes.size() == 3);
154 
155  // Get input values for validation
156  unsigned int outputSize = inputShapes[1][1];
157 
158  std::vector<TensorShape> outShapes;
159  if (m_Param.m_TimeMajor)
160  {
161  outShapes.push_back(TensorShape({inputShapes[0][0], inputShapes[0][1], outputSize}));
162  }
163  else
164  {
165  outShapes.push_back(TensorShape({inputShapes[0][0], inputShapes[0][1], outputSize}));
166  }
167  return outShapes;
168 }
169 
171 {
173 
174  const TensorShape& outputShape = GetOutputSlot(2).GetTensorInfo().GetShape();
175 
177 
178  auto inferredShapes = InferOutputShapes( {
182  });
183 
184  ARMNN_ASSERT(inferredShapes.size() == 1);
185 
186  // Check if the weights are nullptr
188  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
190  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
192  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
194  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights "
195  "should not be null.");
197  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
199  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights "
200  "should not be null.");
202  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
204  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_CellBias should not be null.");
206  "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
207 
208  if (!m_Param.m_CifgEnabled)
209  {
211  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
213  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights "
214  "should not be null.");
216  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
217  }
218  else
219  {
221  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value "
222  "when CIFG is enabled.");
224  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value "
225  "when CIFG is enabled.");
227  "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not have a value "
228  "when CIFG is enabled.");
229  }
230 
232  {
234  "UnidirectionalSequenceLstmLayer: m_ProjectionParameters.m_ProjectionWeights "
235  "should not be null.");
236  }
237 
239  {
240  if (!m_Param.m_CifgEnabled)
241  {
243  "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToInputWeights "
244  "should not be null "
245  "when Peephole is enabled and CIFG is disabled.");
246  }
248  "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToForgetWeights "
249  "should not be null.");
251  "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToOutputWeights "
252  "should not be null.");
253  }
254 
256  {
258  {
260  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_inputLayerNormWeights "
261  "should not be null.");
262  }
264  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights "
265  "should not be null.");
267  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_cellLayerNormWeights "
268  "should not be null.");
270  "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_outputLayerNormWeights "
271  "should not be null.");
272  }
273 
274  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "UnidirectionalSequenceLstmLayer");
275 }
276 
278 {
279  // For API stability DO NOT ALTER order and add new members to the end of vector
289 
290  // Cifg parameters
294 
295  // Projection parameters
298 
299  // Peephole parameters
303 
304  // Layer normalisation parameters
309 }
310 
312 void UnidirectionalSequenceLstmLayer::Accept(ILayerVisitor& visitor) const
313 {
314  IgnoreUnused(visitor);
315  throw armnn::Exception("UnidirectionalSequenceLstmLayer: VisitUnidirectionalSequenceLstmLayer is not implemented");
316 }
318 
320 {
321  std::vector<ConstTensor> constTensors;
322 
323  LstmDescriptor descriptor = GetParameters();
324 
334 
335  // Cifg parameters
339 
340  // Projection parameters
343 
344  // Peephole parameters
348 
349  // Layer normalisation parameters
354 
355  // First add mandatory/basic parameters
357  {
358  constTensors.emplace_back(ConstTensor(managedInputToForgetWeights.GetTensorInfo(),
359  managedInputToForgetWeights.Map()));
360  }
362  {
363  constTensors.emplace_back(ConstTensor(managedInputToCellWeights.GetTensorInfo(),
364  managedInputToCellWeights.Map()));
365  }
367  {
368  constTensors.emplace_back(ConstTensor(managedInputToOutputWeights.GetTensorInfo(),
369  managedInputToOutputWeights.Map()));
370  }
372  {
373  constTensors.emplace_back(ConstTensor(
374  managedRecurrentToForgetWeights.GetTensorInfo(),
375  managedRecurrentToForgetWeights.Map()));
376  }
378  {
379  constTensors.emplace_back(ConstTensor(
380  managedRecurrentToCellWeights.GetTensorInfo(),
381  managedRecurrentToCellWeights.Map()));
382  }
384  {
385  constTensors.emplace_back(ConstTensor(
386  managedRecurrentToOutputWeights.GetTensorInfo(),
387  managedRecurrentToOutputWeights.Map()));
388  }
389  if (m_BasicParameters.m_ForgetGateBias != nullptr)
390  {
391  constTensors.emplace_back(ConstTensor(managedForgetGateBias.GetTensorInfo(),
392  managedForgetGateBias.Map()));
393  }
394  if (m_BasicParameters.m_CellBias != nullptr)
395  {
396  constTensors.emplace_back(ConstTensor(managedCellBias.GetTensorInfo(),
397  managedCellBias.Map()));
398  }
399  if (m_BasicParameters.m_OutputGateBias != nullptr)
400  {
401  constTensors.emplace_back(ConstTensor(managedOutputGateBias.GetTensorInfo(),
402  managedOutputGateBias.Map()));
403  }
404 
405  // Add cifg parameters
406  if (!descriptor.m_CifgEnabled)
407  {
409  {
410  constTensors.emplace_back(ConstTensor(managedInputToInputWeights.GetTensorInfo(),
411  managedInputToInputWeights.Map()));
412  }
414  {
415  constTensors.emplace_back(ConstTensor(
416  managedRecurrentToInputWeights.GetTensorInfo(),
417  managedRecurrentToInputWeights.Map()));
418  }
419  if (m_CifgParameters.m_InputGateBias != nullptr)
420  {
421  constTensors.emplace_back(ConstTensor(managedInputGateBias.GetTensorInfo(),
422  managedInputGateBias.Map()));
423  }
424  }
425 
426  // Add peephole parameters
427  if (descriptor.m_PeepholeEnabled)
428  {
429  if (!descriptor.m_CifgEnabled)
430  {
432  {
433  constTensors.emplace_back(ConstTensor(managedCellToInputWeights.GetTensorInfo(),
434  managedCellToInputWeights.Map()));
435  }
436  }
438  {
439  constTensors.emplace_back(ConstTensor(managedCellToForgetWeights.GetTensorInfo(),
440  managedCellToForgetWeights.Map()));
441  }
443  {
444  constTensors.emplace_back(ConstTensor(managedCellToOutputWeights.GetTensorInfo(),
445  managedCellToOutputWeights.Map()));
446  }
447  }
448 
449  // Add projection parameters
450  if (descriptor.m_ProjectionEnabled)
451  {
453  {
454  constTensors.emplace_back(ConstTensor(managedProjectionWeights.GetTensorInfo(),
455  managedProjectionWeights.Map()));
456  }
458  {
459  constTensors.emplace_back(ConstTensor(managedProjectionBias.GetTensorInfo(),
460  managedProjectionBias.Map()));
461  }
462  }
463 
464  // Add norm parameters
465  if (descriptor.m_LayerNormEnabled)
466  {
467  if (!descriptor.m_CifgEnabled)
468  {
470  {
471  constTensors.emplace_back(ConstTensor(managedInputLayerNormWeights.GetTensorInfo(),
472  managedInputLayerNormWeights.Map()));
473  }
474  }
476  {
477  constTensors.emplace_back(ConstTensor(managedForgetLayerNormWeights.GetTensorInfo(),
478  managedForgetLayerNormWeights.Map()));
479  }
481  {
482  constTensors.emplace_back(ConstTensor(managedCellLayerNormWeights.GetTensorInfo(),
483  managedCellLayerNormWeights.Map()));
484  }
486  {
487  constTensors.emplace_back(ConstTensor(managedOutputLayerNormWeights.GetTensorInfo(),
488  managedOutputLayerNormWeights.Map()));
489  }
490  }
491 
492  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
493 }
494 
495 } // namespace armnn
std::shared_ptr< ConstTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_OutputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
bool m_ProjectionEnabled
Enable/disable the projection layer.
std::shared_ptr< ConstTensorHandle > m_OutputLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_CellToForgetWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
LstmDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
UnidirectionalSequenceLstmLayer(const LstmDescriptor &param, const char *name)
Constructor to create a UnidirectionalSequenceLstmLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of UnidirectionalSequenceLstmLa...
std::shared_ptr< ConstTensorHandle > m_InputLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the UnidirectionalSequence LSTM type.
std::shared_ptr< ConstTensorHandle > m_ProjectionWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
bool m_TimeMajor
Enable/disable time major.
const TensorInfo & GetTensorInfo() const
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
const LstmDescriptor & GetParameters() const override
void IgnoreUnused(Ts &&...)
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
std::shared_ptr< ConstTensorHandle > m_InputGateBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_CellToOutputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
std::shared_ptr< ConstTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_CellBias
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:124
An LstmDescriptor for the LstmLayer.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
UnidirectionalSequenceLstmLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::shared_ptr< ConstTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
bool m_PeepholeEnabled
Enable/disable peephole.
std::shared_ptr< ConstTensorHandle > m_CellLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::shared_ptr< ConstTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
std::shared_ptr< ConstTensorHandle > m_ProjectionBias
A unique pointer to represent 1D weights tensor with dimensions [output_size].
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
This layer represents a LSTM operation.
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::shared_ptr< ConstTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
Layer::ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:317
const ConstTensorHandle * m_RecurrentToOutputWeights
std::shared_ptr< ConstTensorHandle > m_ForgetLayerNormWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::shared_ptr< ConstTensorHandle > m_CellToInputWeights
A unique pointer to represent 1D weights tensor with dimensions [num_units].
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
const ConstTensorHandle * m_RecurrentToForgetWeights
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:421
std::shared_ptr< ConstTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467