18 : NeonBaseWorkload<QLstmQueueDescriptor>(descriptor,
info)
22 descriptor.m_Parameters,
26 arm_compute::LSTMParams<arm_compute::ITensor> qLstmParams;
29 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
32 m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
35 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
38 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
41 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
44 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
47 m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
50 m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
53 m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
59 m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
67 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
70 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
74 qLstmParams.set_peephole_params(m_CellToForgetWeightsTensor.get(),
75 m_CellToOutputWeightsTensor.get());
80 m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
83 m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
90 qLstmParams.set_projection_params(
91 m_ProjectionWeightsTensor.get(),
97 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
104 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
107 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
110 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
114 qLstmParams.set_layer_normalization_params(
116 m_ForgetLayerNormWeightsTensor.get(),
117 m_CellLayerNormWeightsTensor.get(),
118 m_OutputLayerNormWeightsTensor.get());
123 m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
126 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
129 m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
133 qLstmParams.set_cifg_params(
134 m_InputToInputWeightsTensor.get(),
135 m_RecurrentToInputWeightsTensor.get(),
137 m_InputGateBiasTensor.get());
141 const arm_compute::ITensor& input =
static_cast<IAclTensorHandle*
>(
m_Data.
m_Inputs[0])->GetTensor();
142 arm_compute::ITensor& outputStateIn =
static_cast<IAclTensorHandle*
>(
m_Data.
m_Inputs[1])->GetTensor();
143 const arm_compute::ITensor& cellStateIn =
static_cast<IAclTensorHandle*
>(
m_Data.
m_Inputs[2])->GetTensor();
145 arm_compute::ITensor& outputStateOut =
static_cast<IAclTensorHandle*
>(
m_Data.
m_Outputs[0])->GetTensor();
146 arm_compute::ITensor& cellStateOut =
static_cast<IAclTensorHandle*
>(
m_Data.
m_Outputs[1])->GetTensor();
147 arm_compute::ITensor& output =
static_cast<IAclTensorHandle*
>(
m_Data.
m_Outputs[2])->GetTensor();
160 m_QLstmLayer.configure(&input,
161 m_InputToForgetWeightsTensor.get(),
162 m_InputToCellWeightsTensor.get(),
163 m_InputToOutputWeightsTensor.get(),
164 m_RecurrentToForgetWeightsTensor.get(),
165 m_RecurrentToCellWeightsTensor.get(),
166 m_RecurrentToOutputWeightsTensor.get(),
167 m_ForgetGateBiasTensor.get(),
168 m_CellBiasTensor.get(),
169 m_OutputGateBiasTensor.get(),
232 m_QLstmLayer.prepare();
const ConstTensorHandle * m_CellLayerNormWeights
const ConstTensorHandle * m_ProjectionWeights
const ConstTensorHandle * m_ForgetGateBias
const ConstTensorHandle * m_InputToOutputWeights
bool m_PeepholeEnabled
Enable/disable peephole.
float m_HiddenStateScale
Hidden State quantization scale.
const ConstTensorHandle * m_InputToInputWeights
const ConstTensorHandle * m_CellToOutputWeights
float m_OutputIntermediateScale
Output intermediate quantization scale.
const ConstTensorHandle * m_CellToInputWeights
LayerDescriptor m_Parameters
const ConstTensorHandle * m_ForgetLayerNormWeights
const TensorInfo & GetTensorInfo() const
bool m_LayerNormEnabled
Enable/disable layer normalization.
QLstmQueueDescriptor m_Data
float m_ProjectionClip
Clipping threshold value for the projection.
const ConstTensorHandle * m_InputToForgetWeights
float m_InputIntermediateScale
Input intermediate quantization scale.
const ConstTensorHandle * m_CellBias
const ConstTensorHandle * m_InputLayerNormWeights
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
const ConstTensorHandle * m_InputToCellWeights
float m_CellClip
Clipping threshold value for the cell state.
const ConstTensorHandle * m_CellToForgetWeights
const ConstTensorHandle * m_ProjectionBias
std::vector< ITensorHandle * > m_Outputs
const ConstTensorHandle * m_RecurrentToCellWeights
bool m_ProjectionEnabled
Enable/disable the projection layer.
const ConstTensorHandle * m_InputGateBias
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstTensorHandle *handle)
const ConstTensorHandle * m_OutputGateBias
std::vector< ITensorHandle * > m_Inputs
const ConstTensorHandle * m_OutputLayerNormWeights
float m_CellIntermediateScale
Cell intermediate quantization scale.
const ConstTensorHandle * m_RecurrentToOutputWeights
const ConstTensorHandle * m_RecurrentToInputWeights
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
const ConstTensorHandle * m_RecurrentToForgetWeights
int32_t m_HiddenStateZeroPoint
Hidden State zero point.