ArmNN
 21.08
UnidirectionalSequenceLstmQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for UnidirectionalSequenceLstmQueueDescriptor:
QueueDescriptorWithParameters< LstmDescriptor > QueueDescriptor

Public Member Functions

 UnidirectionalSequenceLstmQueueDescriptor ()
 
void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptor
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 

Public Attributes

const ConstTensorHandlem_InputToInputWeights
 
const ConstTensorHandlem_InputToForgetWeights
 
const ConstTensorHandlem_InputToCellWeights
 
const ConstTensorHandlem_InputToOutputWeights
 
const ConstTensorHandlem_RecurrentToInputWeights
 
const ConstTensorHandlem_RecurrentToForgetWeights
 
const ConstTensorHandlem_RecurrentToCellWeights
 
const ConstTensorHandlem_RecurrentToOutputWeights
 
const ConstTensorHandlem_CellToInputWeights
 
const ConstTensorHandlem_CellToForgetWeights
 
const ConstTensorHandlem_CellToOutputWeights
 
const ConstTensorHandlem_InputGateBias
 
const ConstTensorHandlem_ForgetGateBias
 
const ConstTensorHandlem_CellBias
 
const ConstTensorHandlem_OutputGateBias
 
const ConstTensorHandlem_ProjectionWeights
 
const ConstTensorHandlem_ProjectionBias
 
const ConstTensorHandlem_InputLayerNormWeights
 
const ConstTensorHandlem_ForgetLayerNormWeights
 
const ConstTensorHandlem_CellLayerNormWeights
 
const ConstTensorHandlem_OutputLayerNormWeights
 
- Public Attributes inherited from QueueDescriptorWithParameters< LstmDescriptor >
LstmDescriptor m_Parameters
 
- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 

Additional Inherited Members

- Protected Member Functions inherited from QueueDescriptorWithParameters< LstmDescriptor >
 ~QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters (QueueDescriptorWithParameters const &)=default
 
QueueDescriptorWithParametersoperator= (QueueDescriptorWithParameters const &)=default
 
- Protected Member Functions inherited from QueueDescriptor
 ~QueueDescriptor ()=default
 
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 698 of file WorkloadData.hpp.

Constructor & Destructor Documentation

◆ UnidirectionalSequenceLstmQueueDescriptor()

Definition at line 700 of file WorkloadData.hpp.

701  : m_InputToInputWeights(nullptr)
702  , m_InputToForgetWeights(nullptr)
703  , m_InputToCellWeights(nullptr)
704  , m_InputToOutputWeights(nullptr)
705  , m_RecurrentToInputWeights(nullptr)
706  , m_RecurrentToForgetWeights(nullptr)
707  , m_RecurrentToCellWeights(nullptr)
708  , m_RecurrentToOutputWeights(nullptr)
709  , m_CellToInputWeights(nullptr)
710  , m_CellToForgetWeights(nullptr)
711  , m_CellToOutputWeights(nullptr)
712  , m_InputGateBias(nullptr)
713  , m_ForgetGateBias(nullptr)
714  , m_CellBias(nullptr)
715  , m_OutputGateBias(nullptr)
716  , m_ProjectionWeights(nullptr)
717  , m_ProjectionBias(nullptr)
718  , m_InputLayerNormWeights(nullptr)
719  , m_ForgetLayerNormWeights(nullptr)
720  , m_CellLayerNormWeights(nullptr)
721  , m_OutputLayerNormWeights(nullptr)
722  {
723  }
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToForgetWeights

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 3719 of file WorkloadData.cpp.

References armnn::Float32, WorkloadInfo::m_InputTensorInfos, and WorkloadInfo::m_OutputTensorInfos.

3720 {
3721  // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3722 
3723  const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3724 
3725  // check dimensions of all inputs and outputs
3726  if (workloadInfo.m_InputTensorInfos.size() != 3)
3727  {
3728  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3729  }
3730  if (workloadInfo.m_OutputTensorInfos.size() != 1)
3731  {
3732  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3733  }
3734 
3735  std::vector<DataType> supportedTypes =
3736  {
3738  };
3739 
3740  // check for supported type of one input and match them with all the other input and output
3741  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3742 
3743  // type matches all other inputs
3744  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
3745  {
3746  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3747  workloadInfo.m_InputTensorInfos[i],
3748  descriptorName,
3749  "input_0",
3750  "input_" + std::to_string(i));
3751  }
3752  // type matches all other outputs
3753  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
3754  {
3755  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3756  workloadInfo.m_OutputTensorInfos[i],
3757  "LstmQueueDescriptor",
3758  "input_0",
3759  "output_" + std::to_string(i));
3760  }
3761 
3762  // Making sure clipping parameters have valid values.
3763  // == 0 means no clipping
3764  // > 0 means clipping
3765  if (m_Parameters.m_ClippingThresCell < 0.0f)
3766  {
3767  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3768  }
3769  if (m_Parameters.m_ClippingThresProj < 0.0f)
3770  {
3771  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3772  }
3773 
3774  unsigned int batchIndx = 0;
3775  unsigned int inputIndx = 1;
3776  uint32_t timeStep = 1;
3777  unsigned int timeIndx = 1;
3778  inputIndx = 2;
3780  {
3781  batchIndx = 1;
3782  timeIndx = 0;
3783 
3784  }
3785  timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3786 
3787  // Inferring batch size, number of outputs and number of cells from the inputs.
3788  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3789  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3790  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3791  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3792  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3793  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3794 
3795  // input tensor
3796  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3797  descriptorName + " input_0");
3798  // outputStateInTensor
3799  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3800  descriptorName + " input_1");
3801  // outputStateInTensor
3802  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3803  descriptorName + " input_2");
3804 
3805  // outputTensor
3806  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 3, (timeStep * n_batch * n_output),
3807  descriptorName + " output_0");
3808 
3809  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3810  if ( m_InputToInputWeights )
3811  {
3812  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3813  (n_cell * n_input), "InputLayerNormWeights");
3814  }
3815 
3816  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3817  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3818  (n_cell * n_input), "InputToForgetWeights");
3819 
3820  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3821  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3822  (n_cell * n_input), "InputToCellWeights");
3823 
3825  {
3826  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3827  (n_cell * n_output), "RecurrentToInputWeights");
3828  }
3829 
3830  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3831  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3832  (n_cell * n_output), "RecurrentToForgetWeights");
3833 
3834  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3835  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3836  (n_cell * n_output), "RecurrentToCellWeights");
3837 
3838  // Make sure the input-gate's parameters are either both present (regular
3839  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3840  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3844  if (!cifg_weights_all_or_none)
3845  {
3846  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
3847  "RecurrentToInputWeights must either both be present (regular LSTM) "
3848  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
3849  "accordingly.");
3850  }
3851 
3852  if ( m_CellToInputWeights )
3853  {
3854  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
3855  n_cell, "CellToInputWeights");
3856  }
3857  if ( m_CellToForgetWeights )
3858  {
3859  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
3860  n_cell, "CellToForgetWeights");
3861  }
3862  if ( m_CellToOutputWeights )
3863  {
3864  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
3865  n_cell, "CellToOutputWeights");
3866  }
3867 
3868  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
3869  bool peephole_weights_all_or_none =
3874  if (!peephole_weights_all_or_none)
3875  {
3876  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
3877  }
3878 
3879  // Make sure the input gate bias is present only when not a CIFG-LSTM.
3881  {
3882  if (m_InputGateBias)
3883  {
3884  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
3885  }
3886  }
3887  else
3888  {
3889  if (!m_InputGateBias)
3890  {
3891  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
3892  "must be present.");
3893  }
3894  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
3895  n_cell, "InputGateBias");
3896  }
3897 
3898  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
3899  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
3900 
3901  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
3902  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
3903 
3904  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
3905  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
3906 
3907  if (m_ProjectionWeights)
3908  {
3909  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
3910  (n_cell * n_output), "ProjectionWeights");
3911  }
3912  if (m_ProjectionBias)
3913  {
3914  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
3915  }
3916 
3917  // Making sure the projection tensors are consistent:
3918  // 1) If projection weight is not present, then projection bias should not be
3919  // present.
3920  // 2) If projection weight is present, then projection bias is optional.
3921  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
3927  if (!projecton_tensors_consistent)
3928  {
3929  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
3930  }
3931 
3932  // The four layer normalization weights either all have values or none of them have values. Additionally, if
3933  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
3934  // either all have values or none of them have values. Layer normalization is used when the values of all the
3935  // layer normalization weights are present
3937  {
3938  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
3939  }
3941  {
3942  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
3943  }
3945  {
3946  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
3947  }
3949  {
3950  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
3951  }
3952 
3954  {
3956  {
3958  {
3959  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
3960  "disabled but InputLayerNormWeights are not present");
3961  }
3962  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
3963  1, n_cell, "InputLayerNormWeights");
3964  }
3965  else if (m_InputLayerNormWeights)
3966  {
3967  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
3968  "enabled");
3969  }
3970 
3971  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
3972  "ForgetLayerNormWeights");
3973  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
3974 
3975  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
3976  "OutputLayerNormWeights");
3977  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
3978 
3979  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
3980  "CellLayerNormWeights");
3981  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
3982  }
3984  {
3985  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
3986  "normalisation weights are present.");
3987  }
3988 }
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_ClippingThresProj
Clipping threshold value for the projection.
TensorShape GetShape() const override
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
bool m_TimeMajor
Enable/disable time major.
const TensorInfo & GetTensorInfo() const
std::vector< TensorInfo > m_InputTensorInfos
bool m_PeepholeEnabled
Enable/disable peephole.
std::vector< TensorInfo > m_OutputTensorInfos
float m_ClippingThresCell
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToForgetWeights

Member Data Documentation

◆ m_CellBias

◆ m_CellLayerNormWeights

◆ m_CellToForgetWeights

◆ m_CellToInputWeights

◆ m_CellToOutputWeights

◆ m_ForgetGateBias

◆ m_ForgetLayerNormWeights

◆ m_InputGateBias

◆ m_InputLayerNormWeights

◆ m_InputToCellWeights

◆ m_InputToForgetWeights

◆ m_InputToInputWeights

◆ m_InputToOutputWeights

◆ m_OutputGateBias

◆ m_OutputLayerNormWeights

◆ m_ProjectionBias

◆ m_ProjectionWeights

◆ m_RecurrentToCellWeights

◆ m_RecurrentToForgetWeights

◆ m_RecurrentToInputWeights

◆ m_RecurrentToOutputWeights


The documentation for this struct was generated from the following files: