49 auto layer = CloneBase<QuantizedLstmLayer>(graph,
GetName());
78 return std::move(layer);
86 unsigned int numBatches = inputShapes[0][0];
87 unsigned int outputSize = inputShapes[1][1];
89 std::vector<TensorShape> outShapes;
90 outShapes.push_back(
TensorShape({numBatches, outputSize}));
91 outShapes.push_back(
TensorShape({numBatches, outputSize}));
115 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
117 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
119 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
121 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
124 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
126 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
128 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
130 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
133 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
135 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
137 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
139 "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
147 "QuantizedLstmLayer",
198 managedInputToInputWeights.
Map());
199 inputToInputWeightsTensor = inputToInputWeightsTensorCopy;
207 managedInputToForgetWeights.
Map());
208 inputToForgetWeightsTensor = inputToForgetWeightsTensorCopy;
216 managedInputToCellWeights.
Map());
217 inputToCellWeightsTensor = inputToCellWeightsTensorCopy;
225 managedInputToOutputWeights.
Map());
226 inputToOutputWeightsTensor = inputToOutputWeightsTensorCopy;
236 managedRecurrentToInputWeights.
Map());
237 recurrentToInputWeightsTensor = recurrentToInputWeightsTensorCopy;
246 managedRecurrentToForgetWeights.
Map());
247 recurrentToForgetWeightsTensor = recurrentToForgetWeightsTensorCopy;
256 managedRecurrentToCellWeights.
Map());
257 recurrentToCellWeightsTensor = recurrentToCellWeightsTensorCopy;
266 managedRecurrentToOutputWeights.
Map());
267 recurrentToOutputWeightsTensor = recurrentToOutputWeightsTensorCopy;
276 managedInputGateBias.
Map());
277 inputGateBiasTensor = inputGateBiasTensorCopy;
285 managedForgetGateBias.
Map());
286 forgetGateBiasTensor = forgetGateBiasTensorCopy;
294 managedCellBias.
Map());
295 cellBiasTensor = cellBiasTensorCopy;
303 managedOutputGateBias.
Map());
304 outputGateBiasTensor = outputGateBiasCopy;
308 visitor.VisitQuantizedLstmLayer(
this, inputParams,
GetName());
314 std::vector<ConstTensor> constTensors;
335 managedInputToInputWeights.
Map()));
341 managedInputToForgetWeights.
Map()));
347 managedInputToCellWeights.
Map()));
353 managedInputToOutputWeights.
Map()));
361 managedRecurrentToInputWeights.
Map()));
368 managedRecurrentToForgetWeights.
Map()));
375 managedRecurrentToCellWeights.
Map()));
382 managedRecurrentToOutputWeights.
Map()));
389 managedInputGateBias.
Map()));
395 managedForgetGateBias.
Map()));
401 managedCellBias.
Map()));
407 managedOutputGateBias.
Map()));
Layer::ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
std::shared_ptr< ConstTensorHandle > m_ForgetGateBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
QuantizedLstmParameters m_QuantizedLstmParameters
const TensorShape & GetShape() const
std::shared_ptr< ConstTensorHandle > m_InputToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
std::shared_ptr< ConstTensorHandle > m_InputGateBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
QuantizedLstmLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
const ConstTensorHandle * m_InputGateBias
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of QuantizedLstmLayer.
const TensorInfo & GetTensorInfo() const
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_InputToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const ConstTensorHandle * m_RecurrentToInputWeights
QuantizedLstmLayer(const char *name)
Constructor to create a QuantizedLstmLayer.
std::shared_ptr< ConstTensorHandle > m_CellBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
std::shared_ptr< ConstTensorHandle > m_RecurrentToOutputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
std::shared_ptr< ConstTensorHandle > m_RecurrentToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
Base class for all descriptors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
std::shared_ptr< ConstTensorHandle > m_InputToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const ConstTensorHandle * m_InputToForgetWeights
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
#define ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_ASSERT_MSG(COND, MSG)
This layer represents a QuantizedLstm operation.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const ConstTensorHandle * m_RecurrentToCellWeights
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
const ConstTensorHandle * m_ForgetGateBias
#define ARMNN_ASSERT(COND)
const ConstTensorHandle * m_RecurrentToOutputWeights
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the QuantizedLstm type.
const ConstTensorHandle * m_OutputGateBias
const ConstTensorHandle * m_RecurrentToForgetWeights
std::shared_ptr< ConstTensorHandle > m_OutputGateBias
A unique pointer to represent 1D bias tensor with dimensions [outputSize] (int32).
void SetAdditionalInfo(QueueDescriptor &descriptor) const
std::shared_ptr< ConstTensorHandle > m_RecurrentToCellWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
const ConstTensorHandle * m_InputToOutputWeights
const ConstTensorHandle * m_InputToInputWeights
const ConstTensorHandle * m_CellBias
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
std::shared_ptr< ConstTensorHandle > m_RecurrentToForgetWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, outputSize] (QAsymm8)...
const ConstTensorHandle * m_InputToCellWeights
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
std::shared_ptr< ConstTensorHandle > m_InputToInputWeights
A unique pointer to represent 2D weights tensor with dimensions [outputSize, inputSize] (QAsymm8)...
const TensorInfo & GetTensorInfo() const override
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...