ArmNN
 22.02
UnidirectionalSequenceLstmQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for UnidirectionalSequenceLstmQueueDescriptor:
QueueDescriptorWithParameters< LstmDescriptor > QueueDescriptor

Public Member Functions

 UnidirectionalSequenceLstmQueueDescriptor ()
 
void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptorWithParameters< LstmDescriptor >
virtual ~QueueDescriptorWithParameters ()=default
 
- Public Member Functions inherited from QueueDescriptor
virtual ~QueueDescriptor ()=default
 
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 

Public Attributes

const ConstTensorHandlem_InputToInputWeights
 
const ConstTensorHandlem_InputToForgetWeights
 
const ConstTensorHandlem_InputToCellWeights
 
const ConstTensorHandlem_InputToOutputWeights
 
const ConstTensorHandlem_RecurrentToInputWeights
 
const ConstTensorHandlem_RecurrentToForgetWeights
 
const ConstTensorHandlem_RecurrentToCellWeights
 
const ConstTensorHandlem_RecurrentToOutputWeights
 
const ConstTensorHandlem_CellToInputWeights
 
const ConstTensorHandlem_CellToForgetWeights
 
const ConstTensorHandlem_CellToOutputWeights
 
const ConstTensorHandlem_InputGateBias
 
const ConstTensorHandlem_ForgetGateBias
 
const ConstTensorHandlem_CellBias
 
const ConstTensorHandlem_OutputGateBias
 
const ConstTensorHandlem_ProjectionWeights
 
const ConstTensorHandlem_ProjectionBias
 
const ConstTensorHandlem_InputLayerNormWeights
 
const ConstTensorHandlem_ForgetLayerNormWeights
 
const ConstTensorHandlem_CellLayerNormWeights
 
const ConstTensorHandlem_OutputLayerNormWeights
 
- Public Attributes inherited from QueueDescriptorWithParameters< LstmDescriptor >
LstmDescriptor m_Parameters
 
- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 

Additional Inherited Members

- Protected Member Functions inherited from QueueDescriptorWithParameters< LstmDescriptor >
 QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters (QueueDescriptorWithParameters const &)=default
 
QueueDescriptorWithParametersoperator= (QueueDescriptorWithParameters const &)=default
 
- Protected Member Functions inherited from QueueDescriptor
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 714 of file WorkloadData.hpp.

Constructor & Destructor Documentation

◆ UnidirectionalSequenceLstmQueueDescriptor()

Definition at line 716 of file WorkloadData.hpp.

717  : m_InputToInputWeights(nullptr)
718  , m_InputToForgetWeights(nullptr)
719  , m_InputToCellWeights(nullptr)
720  , m_InputToOutputWeights(nullptr)
721  , m_RecurrentToInputWeights(nullptr)
722  , m_RecurrentToForgetWeights(nullptr)
723  , m_RecurrentToCellWeights(nullptr)
724  , m_RecurrentToOutputWeights(nullptr)
725  , m_CellToInputWeights(nullptr)
726  , m_CellToForgetWeights(nullptr)
727  , m_CellToOutputWeights(nullptr)
728  , m_InputGateBias(nullptr)
729  , m_ForgetGateBias(nullptr)
730  , m_CellBias(nullptr)
731  , m_OutputGateBias(nullptr)
732  , m_ProjectionWeights(nullptr)
733  , m_ProjectionBias(nullptr)
734  , m_InputLayerNormWeights(nullptr)
735  , m_ForgetLayerNormWeights(nullptr)
736  , m_CellLayerNormWeights(nullptr)
737  , m_OutputLayerNormWeights(nullptr)
738  {
739  }
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToForgetWeights

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 3816 of file WorkloadData.cpp.

References armnn::Float32, WorkloadInfo::m_InputTensorInfos, and WorkloadInfo::m_OutputTensorInfos.

3817 {
3818  // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3819 
3820  const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3821 
3822  // check dimensions of all inputs and outputs
3823  if (workloadInfo.m_InputTensorInfos.size() != 3)
3824  {
3825  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3826  }
3827  if (workloadInfo.m_OutputTensorInfos.size() != 1)
3828  {
3829  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3830  }
3831 
3832  std::vector<DataType> supportedTypes =
3833  {
3835  };
3836 
3837  // check for supported type of one input and match them with all the other input and output
3838  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3839 
3840  // type matches all other inputs
3841  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
3842  {
3843  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3844  workloadInfo.m_InputTensorInfos[i],
3845  descriptorName,
3846  "input_0",
3847  "input_" + std::to_string(i));
3848  }
3849  // type matches all other outputs
3850  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
3851  {
3852  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3853  workloadInfo.m_OutputTensorInfos[i],
3854  "LstmQueueDescriptor",
3855  "input_0",
3856  "output_" + std::to_string(i));
3857  }
3858 
3859  // Making sure clipping parameters have valid values.
3860  // == 0 means no clipping
3861  // > 0 means clipping
3862  if (m_Parameters.m_ClippingThresCell < 0.0f)
3863  {
3864  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3865  }
3866  if (m_Parameters.m_ClippingThresProj < 0.0f)
3867  {
3868  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3869  }
3870 
3871  unsigned int batchIndx = 0;
3872  unsigned int inputIndx = 1;
3873  uint32_t timeStep = 1;
3874  unsigned int timeIndx = 1;
3875  inputIndx = 2;
3877  {
3878  batchIndx = 1;
3879  timeIndx = 0;
3880 
3881  }
3882  timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3883 
3884  // Inferring batch size, number of outputs and number of cells from the inputs.
3885  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3886  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3887  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3888  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3889  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3890  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3891 
3892  // input tensor
3893  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3894  descriptorName + " input_0");
3895  // outputStateInTensor
3896  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3897  descriptorName + " input_1");
3898  // outputStateInTensor
3899  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3900  descriptorName + " input_2");
3901 
3902  // outputTensor
3903  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 3, (timeStep * n_batch * n_output),
3904  descriptorName + " output_0");
3905 
3906  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3907  if ( m_InputToInputWeights )
3908  {
3909  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3910  (n_cell * n_input), "InputLayerNormWeights");
3911  }
3912 
3913  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3914  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3915  (n_cell * n_input), "InputToForgetWeights");
3916 
3917  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3918  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3919  (n_cell * n_input), "InputToCellWeights");
3920 
3922  {
3923  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3924  (n_cell * n_output), "RecurrentToInputWeights");
3925  }
3926 
3927  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3928  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3929  (n_cell * n_output), "RecurrentToForgetWeights");
3930 
3931  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3932  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3933  (n_cell * n_output), "RecurrentToCellWeights");
3934 
3935  // Make sure the input-gate's parameters are either both present (regular
3936  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3937  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3941  if (!cifg_weights_all_or_none)
3942  {
3943  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
3944  "RecurrentToInputWeights must either both be present (regular LSTM) "
3945  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
3946  "accordingly.");
3947  }
3948 
3949  if ( m_CellToInputWeights )
3950  {
3951  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
3952  n_cell, "CellToInputWeights");
3953  }
3954  if ( m_CellToForgetWeights )
3955  {
3956  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
3957  n_cell, "CellToForgetWeights");
3958  }
3959  if ( m_CellToOutputWeights )
3960  {
3961  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
3962  n_cell, "CellToOutputWeights");
3963  }
3964 
3965  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
3966  bool peephole_weights_all_or_none =
3971  if (!peephole_weights_all_or_none)
3972  {
3973  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
3974  }
3975 
3976  // Make sure the input gate bias is present only when not a CIFG-LSTM.
3978  {
3979  if (m_InputGateBias)
3980  {
3981  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
3982  }
3983  }
3984  else
3985  {
3986  if (!m_InputGateBias)
3987  {
3988  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
3989  "must be present.");
3990  }
3991  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
3992  n_cell, "InputGateBias");
3993  }
3994 
3995  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
3996  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
3997 
3998  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
3999  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
4000 
4001  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
4002  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
4003 
4004  if (m_ProjectionWeights)
4005  {
4006  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
4007  (n_cell * n_output), "ProjectionWeights");
4008  }
4009  if (m_ProjectionBias)
4010  {
4011  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
4012  }
4013 
4014  // Making sure the projection tensors are consistent:
4015  // 1) If projection weight is not present, then projection bias should not be
4016  // present.
4017  // 2) If projection weight is present, then projection bias is optional.
4018  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
4024  if (!projecton_tensors_consistent)
4025  {
4026  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
4027  }
4028 
4029  // The four layer normalization weights either all have values or none of them have values. Additionally, if
4030  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
4031  // either all have values or none of them have values. Layer normalization is used when the values of all the
4032  // layer normalization weights are present
4034  {
4035  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
4036  }
4038  {
4039  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4040  }
4042  {
4043  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4044  }
4046  {
4047  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4048  }
4049 
4051  {
4053  {
4055  {
4056  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
4057  "disabled but InputLayerNormWeights are not present");
4058  }
4059  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
4060  1, n_cell, "InputLayerNormWeights");
4061  }
4062  else if (m_InputLayerNormWeights)
4063  {
4064  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
4065  "enabled");
4066  }
4067 
4068  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
4069  "ForgetLayerNormWeights");
4070  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4071 
4072  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
4073  "OutputLayerNormWeights");
4074  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4075 
4076  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
4077  "CellLayerNormWeights");
4078  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4079  }
4081  {
4082  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
4083  "normalisation weights are present.");
4084  }
4085 }
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_ClippingThresProj
Clipping threshold value for the projection.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
bool m_TimeMajor
Enable/disable time major.
const TensorInfo & GetTensorInfo() const
std::vector< TensorInfo > m_InputTensorInfos
bool m_PeepholeEnabled
Enable/disable peephole.
std::vector< TensorInfo > m_OutputTensorInfos
float m_ClippingThresCell
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToForgetWeights

Member Data Documentation

◆ m_CellBias

◆ m_CellLayerNormWeights

◆ m_CellToForgetWeights

◆ m_CellToInputWeights

◆ m_CellToOutputWeights

◆ m_ForgetGateBias

◆ m_ForgetLayerNormWeights

◆ m_InputGateBias

◆ m_InputLayerNormWeights

◆ m_InputToCellWeights

◆ m_InputToForgetWeights

◆ m_InputToInputWeights

◆ m_InputToOutputWeights

◆ m_OutputGateBias

◆ m_OutputLayerNormWeights

◆ m_ProjectionBias

◆ m_ProjectionWeights

◆ m_RecurrentToCellWeights

◆ m_RecurrentToForgetWeights

◆ m_RecurrentToInputWeights

◆ m_RecurrentToOutputWeights


The documentation for this struct was generated from the following files: