ArmNN
 21.02
QuantizedLstmLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "QuantizedLstmLayer.hpp"
6 
7 #include "LayerCloneBase.hpp"
8 
10 #include <armnn/TypesUtils.hpp>
13 
14 namespace armnn
15 {
16 
18  : Layer(3, 2, LayerType::QuantizedLstm, name)
19 {
20 }
21 
22 std::unique_ptr<IWorkload> QuantizedLstmLayer::CreateWorkload(const IWorkloadFactory& factory) const
23 {
25 
26  // QuantizedLstmLayer parameters - there are no optional params
31 
36 
41 
42  SetAdditionalInfo(descriptor);
43 
44  return factory.CreateQuantizedLstm(descriptor, PrepInfoAndDesc(descriptor));
45 }
46 
48 {
49  auto layer = CloneBase<QuantizedLstmLayer>(graph, GetName());
50 
52  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToInputWeights) : nullptr;
53  layer->m_QuantizedLstmParameters.m_InputToForgetWeights = m_QuantizedLstmParameters.m_InputToForgetWeights ?
54  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToForgetWeights) : nullptr;
55  layer->m_QuantizedLstmParameters.m_InputToCellWeights = m_QuantizedLstmParameters.m_InputToCellWeights ?
56  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToCellWeights) : nullptr;
57  layer->m_QuantizedLstmParameters.m_InputToOutputWeights = m_QuantizedLstmParameters.m_InputToOutputWeights ?
58  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputToOutputWeights) : nullptr;
59 
60  layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = m_QuantizedLstmParameters.m_RecurrentToInputWeights ?
61  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToInputWeights) : nullptr;
62  layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = m_QuantizedLstmParameters.m_RecurrentToForgetWeights
63  ? std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToForgetWeights) : nullptr;
64  layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = m_QuantizedLstmParameters.m_RecurrentToCellWeights ?
65  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToCellWeights) : nullptr;
66  layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = m_QuantizedLstmParameters.m_RecurrentToOutputWeights
67  ? std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_RecurrentToOutputWeights) : nullptr;
68 
69  layer->m_QuantizedLstmParameters.m_InputGateBias = m_QuantizedLstmParameters.m_InputGateBias ?
70  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_InputGateBias) : nullptr;
71  layer->m_QuantizedLstmParameters.m_ForgetGateBias = m_QuantizedLstmParameters.m_ForgetGateBias ?
72  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_ForgetGateBias) : nullptr;
73  layer->m_QuantizedLstmParameters.m_CellBias = m_QuantizedLstmParameters.m_CellBias ?
74  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_CellBias) : nullptr;
75  layer->m_QuantizedLstmParameters.m_OutputGateBias = m_QuantizedLstmParameters.m_OutputGateBias ?
76  std::make_unique<ScopedCpuTensorHandle>(*m_QuantizedLstmParameters.m_OutputGateBias) : nullptr;
77 
78  return std::move(layer);
79 }
80 
81 std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
82 {
83  ARMNN_ASSERT(inputShapes.size() == 3);
84 
85  // Get input values for validation
86  unsigned int numBatches = inputShapes[0][0];
87  unsigned int outputSize = inputShapes[1][1];
88 
89  std::vector<TensorShape> outShapes;
90  outShapes.push_back(TensorShape({numBatches, outputSize})); // cellStateOut
91  outShapes.push_back(TensorShape({numBatches, outputSize})); // output
92 
93  return outShapes;
94 }
95 
97 {
99 
100  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
101 
103 
104  auto inferredShapes = InferOutputShapes(
105  {
107  GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), // previousCellStateIn
108  GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousOutputIn
109  });
110 
111  ARMNN_ASSERT(inferredShapes.size() == 2);
112 
113  // Check weights and bias for nullptr
115  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
117  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
119  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
121  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
122 
124  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
126  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
128  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
130  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
131 
133  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
135  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
137  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
139  "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
140 
141  // Check output TensorShape(s) match inferred shape
142  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizedLstmLayer");
143 
145  inferredShapes[1],
147  "QuantizedLstmLayer",
148  1);
149 }
150 
152 {
153  return
154  {
159 
164 
169  };
170 }
171 
173 {
174  QuantizedLstmInputParams inputParams;
175 
176  // InputToX weight tensors
177  ConstTensor inputToInputWeightsTensor;
179  {
180  ConstTensor inputToInputWeightsTensorCopy(m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(),
182  inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
183  inputParams.m_InputToInputWeights = &inputToInputWeightsTensor;
184  }
185 
186  ConstTensor inputToForgetWeightsTensor;
188  {
189  ConstTensor inputToForgetWeightsTensorCopy(m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(),
191  inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
192  inputParams.m_InputToForgetWeights = &inputToForgetWeightsTensor;
193  }
194 
195  ConstTensor inputToCellWeightsTensor;
197  {
198  ConstTensor inputToCellWeightsTensorCopy(m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(),
200  inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
201  inputParams.m_InputToCellWeights = &inputToCellWeightsTensor;
202  }
203 
204  ConstTensor inputToOutputWeightsTensor;
206  {
207  ConstTensor inputToOutputWeightsTensorCopy(m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(),
209  inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
210  inputParams.m_InputToOutputWeights = &inputToOutputWeightsTensor;
211  }
212 
213  // RecurrentToX weight tensors
214  ConstTensor recurrentToInputWeightsTensor;
216  {
217  ConstTensor recurrentToInputWeightsTensorCopy(
220  recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
221  inputParams.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
222  }
223 
224  ConstTensor recurrentToForgetWeightsTensor;
226  {
227  ConstTensor recurrentToForgetWeightsTensorCopy(
230  recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
231  inputParams.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
232  }
233 
234  ConstTensor recurrentToCellWeightsTensor;
236  {
237  ConstTensor recurrentToCellWeightsTensorCopy(
240  recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
241  inputParams.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
242  }
243 
244  ConstTensor recurrentToOutputWeightsTensor;
246  {
247  ConstTensor recurrentToOutputWeightsTensorCopy(
250  recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
251  inputParams.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
252  }
253 
254  // Bias tensors
255  ConstTensor inputGateBiasTensor;
257  {
258  ConstTensor inputGateBiasTensorCopy(m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(),
260  inputGateBiasTensor = inputGateBiasTensorCopy;
261  inputParams.m_InputGateBias = &inputGateBiasTensor;
262  }
263 
264  ConstTensor forgetGateBiasTensor;
266  {
267  ConstTensor forgetGateBiasTensorCopy(m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(),
269  forgetGateBiasTensor = forgetGateBiasTensorCopy;
270  inputParams.m_ForgetGateBias = &forgetGateBiasTensor;
271  }
272 
273  ConstTensor cellBiasTensor;
274  if (m_QuantizedLstmParameters.m_CellBias != nullptr)
275  {
276  ConstTensor cellBiasTensorCopy(m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(),
278  cellBiasTensor = cellBiasTensorCopy;
279  inputParams.m_CellBias = &cellBiasTensor;
280  }
281 
282  ConstTensor outputGateBiasTensor;
284  {
285  ConstTensor outputGateBiasCopy(m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(),
287  outputGateBiasTensor = outputGateBiasCopy;
288  inputParams.m_OutputGateBias = &outputGateBiasTensor;
289  }
290 
291  visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
292 }
293 
295 {
296  std::vector<ConstTensor> constTensors;
297 
298  // InputToX weight tensors
300  {
301  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(),
303  }
304 
306  {
307  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(),
309  }
310 
312  {
313  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(),
315  }
316 
318  {
319  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(),
321  }
322 
323  // RecurrentToX weight tensors
325  {
326  constTensors.emplace_back(ConstTensor(
329  }
330 
332  {
333  constTensors.emplace_back(ConstTensor(
336  }
337 
339  {
340  constTensors.emplace_back(ConstTensor(
343  }
344 
346  {
347  constTensors.emplace_back(ConstTensor(
350  }
351 
352  // Bias tensors
354  {
355  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(),
357  }
358 
360  {
361  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(),
363  }
364 
365  if (m_QuantizedLstmParameters.m_CellBias != nullptr)
366  {
367  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(),
369  }
370 
372  {
373  constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(),
375  }
376 
377 
378  strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName());
379 }
380 
381 } // namespace armnn
const ConstCpuTensorHandle * m_RecurrentToForgetWeights
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
Layer::ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
const ConstCpuTensorHandle * m_InputGateBias
QuantizedLstmParameters m_QuantizedLstmParameters
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
std::unique_ptr< ScopedCpuTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const ConstTensor * m_RecurrentToOutputWeights
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
const ConstTensor * m_RecurrentToForgetWeights
QuantizedLstmLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
const ConstCpuTensorHandle * m_InputToCellWeights
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::unique_ptr< ScopedCpuTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:432
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of QuantizedLstmLayer.
Copyright (c) 2021 ARM Limited and Contributors.
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
QuantizedLstmLayer(const char *name)
Constructor to create a QuantizedLstmLayer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:392
std::unique_ptr< ScopedCpuTensorHandle > m_InputGateBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
std::unique_ptr< ScopedCpuTensorHandle > m_CellBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
Base class for all descriptors.
Definition: Descriptors.hpp:22
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:348
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
This layer represents a QuantizedLstm operation.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
Definition: Layer.hpp:381
std::unique_ptr< ScopedCpuTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
const ConstTensor * m_InputToForgetWeights
const ConstCpuTensorHandle * m_ForgetGateBias
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
const ConstCpuTensorHandle * m_RecurrentToInputWeights
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
const ConstCpuTensorHandle * m_RecurrentToCellWeights
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the QuantizedLstm type.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
const ConstCpuTensorHandle * m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToInputWeights
virtual void VisitQuantizedLstmLayer(const IConnectableLayer *layer, const QuantizedLstmInputParams &params, const char *name=nullptr)=0
Function a QuantizedLstm layer should call back to when its Accept(ILayerVisitor&) function is invoke...
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:245
std::unique_ptr< ScopedCpuTensorHandle > m_OutputGateBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
const ConstCpuTensorHandle * m_CellBias
const ConstTensor * m_RecurrentToCellWeights
const ConstCpuTensorHandle * m_OutputGateBias
const ConstTensor * m_InputToOutputWeights
std::unique_ptr< ScopedCpuTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
const ConstCpuTensorHandle * m_InputToForgetWeights
std::unique_ptr< ScopedCpuTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const ConstCpuTensorHandle * m_InputToOutputWeights
std::vector< std::reference_wrapper< std::unique_ptr< ScopedCpuTensorHandle > >> ConstantTensors
Definition: Layer.hpp:393
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
const ConstCpuTensorHandle * m_InputToInputWeights
std::unique_ptr< ScopedCpuTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:419