ArmNN
 21.11
UnidirectionalSequenceLstmQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for UnidirectionalSequenceLstmQueueDescriptor:
QueueDescriptorWithParameters< LstmDescriptor > QueueDescriptor

Public Member Functions

 UnidirectionalSequenceLstmQueueDescriptor ()
 
void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptor
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 

Public Attributes

const ConstTensorHandlem_InputToInputWeights
 
const ConstTensorHandlem_InputToForgetWeights
 
const ConstTensorHandlem_InputToCellWeights
 
const ConstTensorHandlem_InputToOutputWeights
 
const ConstTensorHandlem_RecurrentToInputWeights
 
const ConstTensorHandlem_RecurrentToForgetWeights
 
const ConstTensorHandlem_RecurrentToCellWeights
 
const ConstTensorHandlem_RecurrentToOutputWeights
 
const ConstTensorHandlem_CellToInputWeights
 
const ConstTensorHandlem_CellToForgetWeights
 
const ConstTensorHandlem_CellToOutputWeights
 
const ConstTensorHandlem_InputGateBias
 
const ConstTensorHandlem_ForgetGateBias
 
const ConstTensorHandlem_CellBias
 
const ConstTensorHandlem_OutputGateBias
 
const ConstTensorHandlem_ProjectionWeights
 
const ConstTensorHandlem_ProjectionBias
 
const ConstTensorHandlem_InputLayerNormWeights
 
const ConstTensorHandlem_ForgetLayerNormWeights
 
const ConstTensorHandlem_CellLayerNormWeights
 
const ConstTensorHandlem_OutputLayerNormWeights
 
- Public Attributes inherited from QueueDescriptorWithParameters< LstmDescriptor >
LstmDescriptor m_Parameters
 
- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 

Additional Inherited Members

- Protected Member Functions inherited from QueueDescriptorWithParameters< LstmDescriptor >
 ~QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters (QueueDescriptorWithParameters const &)=default
 
QueueDescriptorWithParametersoperator= (QueueDescriptorWithParameters const &)=default
 
- Protected Member Functions inherited from QueueDescriptor
 ~QueueDescriptor ()=default
 
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 709 of file WorkloadData.hpp.

Constructor & Destructor Documentation

◆ UnidirectionalSequenceLstmQueueDescriptor()

Definition at line 711 of file WorkloadData.hpp.

712  : m_InputToInputWeights(nullptr)
713  , m_InputToForgetWeights(nullptr)
714  , m_InputToCellWeights(nullptr)
715  , m_InputToOutputWeights(nullptr)
716  , m_RecurrentToInputWeights(nullptr)
717  , m_RecurrentToForgetWeights(nullptr)
718  , m_RecurrentToCellWeights(nullptr)
719  , m_RecurrentToOutputWeights(nullptr)
720  , m_CellToInputWeights(nullptr)
721  , m_CellToForgetWeights(nullptr)
722  , m_CellToOutputWeights(nullptr)
723  , m_InputGateBias(nullptr)
724  , m_ForgetGateBias(nullptr)
725  , m_CellBias(nullptr)
726  , m_OutputGateBias(nullptr)
727  , m_ProjectionWeights(nullptr)
728  , m_ProjectionBias(nullptr)
729  , m_InputLayerNormWeights(nullptr)
730  , m_ForgetLayerNormWeights(nullptr)
731  , m_CellLayerNormWeights(nullptr)
732  , m_OutputLayerNormWeights(nullptr)
733  {
734  }
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToForgetWeights

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 3788 of file WorkloadData.cpp.

References armnn::Float32, WorkloadInfo::m_InputTensorInfos, and WorkloadInfo::m_OutputTensorInfos.

3789 {
3790  // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3791 
3792  const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3793 
3794  // check dimensions of all inputs and outputs
3795  if (workloadInfo.m_InputTensorInfos.size() != 3)
3796  {
3797  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3798  }
3799  if (workloadInfo.m_OutputTensorInfos.size() != 1)
3800  {
3801  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3802  }
3803 
3804  std::vector<DataType> supportedTypes =
3805  {
3807  };
3808 
3809  // check for supported type of one input and match them with all the other input and output
3810  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3811 
3812  // type matches all other inputs
3813  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
3814  {
3815  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3816  workloadInfo.m_InputTensorInfos[i],
3817  descriptorName,
3818  "input_0",
3819  "input_" + std::to_string(i));
3820  }
3821  // type matches all other outputs
3822  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
3823  {
3824  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3825  workloadInfo.m_OutputTensorInfos[i],
3826  "LstmQueueDescriptor",
3827  "input_0",
3828  "output_" + std::to_string(i));
3829  }
3830 
3831  // Making sure clipping parameters have valid values.
3832  // == 0 means no clipping
3833  // > 0 means clipping
3834  if (m_Parameters.m_ClippingThresCell < 0.0f)
3835  {
3836  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3837  }
3838  if (m_Parameters.m_ClippingThresProj < 0.0f)
3839  {
3840  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3841  }
3842 
3843  unsigned int batchIndx = 0;
3844  unsigned int inputIndx = 1;
3845  uint32_t timeStep = 1;
3846  unsigned int timeIndx = 1;
3847  inputIndx = 2;
3849  {
3850  batchIndx = 1;
3851  timeIndx = 0;
3852 
3853  }
3854  timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3855 
3856  // Inferring batch size, number of outputs and number of cells from the inputs.
3857  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3858  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3859  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3860  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3861  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3862  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3863 
3864  // input tensor
3865  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3866  descriptorName + " input_0");
3867  // outputStateInTensor
3868  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3869  descriptorName + " input_1");
3870  // outputStateInTensor
3871  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3872  descriptorName + " input_2");
3873 
3874  // outputTensor
3875  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 3, (timeStep * n_batch * n_output),
3876  descriptorName + " output_0");
3877 
3878  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3879  if ( m_InputToInputWeights )
3880  {
3881  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3882  (n_cell * n_input), "InputLayerNormWeights");
3883  }
3884 
3885  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3886  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3887  (n_cell * n_input), "InputToForgetWeights");
3888 
3889  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3890  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3891  (n_cell * n_input), "InputToCellWeights");
3892 
3894  {
3895  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3896  (n_cell * n_output), "RecurrentToInputWeights");
3897  }
3898 
3899  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3900  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3901  (n_cell * n_output), "RecurrentToForgetWeights");
3902 
3903  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3904  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3905  (n_cell * n_output), "RecurrentToCellWeights");
3906 
3907  // Make sure the input-gate's parameters are either both present (regular
3908  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3909  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3913  if (!cifg_weights_all_or_none)
3914  {
3915  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
3916  "RecurrentToInputWeights must either both be present (regular LSTM) "
3917  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
3918  "accordingly.");
3919  }
3920 
3921  if ( m_CellToInputWeights )
3922  {
3923  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
3924  n_cell, "CellToInputWeights");
3925  }
3926  if ( m_CellToForgetWeights )
3927  {
3928  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
3929  n_cell, "CellToForgetWeights");
3930  }
3931  if ( m_CellToOutputWeights )
3932  {
3933  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
3934  n_cell, "CellToOutputWeights");
3935  }
3936 
3937  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
3938  bool peephole_weights_all_or_none =
3943  if (!peephole_weights_all_or_none)
3944  {
3945  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
3946  }
3947 
3948  // Make sure the input gate bias is present only when not a CIFG-LSTM.
3950  {
3951  if (m_InputGateBias)
3952  {
3953  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
3954  }
3955  }
3956  else
3957  {
3958  if (!m_InputGateBias)
3959  {
3960  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
3961  "must be present.");
3962  }
3963  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
3964  n_cell, "InputGateBias");
3965  }
3966 
3967  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
3968  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
3969 
3970  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
3971  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
3972 
3973  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
3974  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
3975 
3976  if (m_ProjectionWeights)
3977  {
3978  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
3979  (n_cell * n_output), "ProjectionWeights");
3980  }
3981  if (m_ProjectionBias)
3982  {
3983  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
3984  }
3985 
3986  // Making sure the projection tensors are consistent:
3987  // 1) If projection weight is not present, then projection bias should not be
3988  // present.
3989  // 2) If projection weight is present, then projection bias is optional.
3990  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
3996  if (!projecton_tensors_consistent)
3997  {
3998  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
3999  }
4000 
4001  // The four layer normalization weights either all have values or none of them have values. Additionally, if
4002  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
4003  // either all have values or none of them have values. Layer normalization is used when the values of all the
4004  // layer normalization weights are present
4006  {
4007  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
4008  }
4010  {
4011  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4012  }
4014  {
4015  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4016  }
4018  {
4019  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4020  }
4021 
4023  {
4025  {
4027  {
4028  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
4029  "disabled but InputLayerNormWeights are not present");
4030  }
4031  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
4032  1, n_cell, "InputLayerNormWeights");
4033  }
4034  else if (m_InputLayerNormWeights)
4035  {
4036  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
4037  "enabled");
4038  }
4039 
4040  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
4041  "ForgetLayerNormWeights");
4042  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4043 
4044  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
4045  "OutputLayerNormWeights");
4046  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4047 
4048  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
4049  "CellLayerNormWeights");
4050  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4051  }
4053  {
4054  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
4055  "normalisation weights are present.");
4056  }
4057 }
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_ClippingThresProj
Clipping threshold value for the projection.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
bool m_TimeMajor
Enable/disable time major.
const TensorInfo & GetTensorInfo() const
std::vector< TensorInfo > m_InputTensorInfos
bool m_PeepholeEnabled
Enable/disable peephole.
std::vector< TensorInfo > m_OutputTensorInfos
float m_ClippingThresCell
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToForgetWeights

Member Data Documentation

◆ m_CellBias

◆ m_CellLayerNormWeights

◆ m_CellToForgetWeights

◆ m_CellToInputWeights

◆ m_CellToOutputWeights

◆ m_ForgetGateBias

◆ m_ForgetLayerNormWeights

◆ m_InputGateBias

◆ m_InputLayerNormWeights

◆ m_InputToCellWeights

◆ m_InputToForgetWeights

◆ m_InputToInputWeights

◆ m_InputToOutputWeights

◆ m_OutputGateBias

◆ m_OutputLayerNormWeights

◆ m_ProjectionBias

◆ m_ProjectionWeights

◆ m_RecurrentToCellWeights

◆ m_RecurrentToForgetWeights

◆ m_RecurrentToInputWeights

◆ m_RecurrentToOutputWeights


The documentation for this struct was generated from the following files: