25 : FloatWorkload<LstmQueueDescriptor>(descriptor,
info)
29 descriptor.m_Parameters,
33 arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
36 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
37 BuildArmComputeTensor(*m_InputToForgetWeightsTensor,
m_Data.m_InputToForgetWeights->GetTensorInfo());
39 m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
40 BuildArmComputeTensor(*m_InputToCellWeightsTensor,
m_Data.m_InputToCellWeights->GetTensorInfo());
42 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
43 BuildArmComputeTensor(*m_InputToOutputWeightsTensor,
m_Data.m_InputToOutputWeights->GetTensorInfo());
45 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
46 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor,
m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
48 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
49 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor,
m_Data.m_RecurrentToCellWeights->GetTensorInfo());
51 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
52 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor,
m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
54 m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
55 BuildArmComputeTensor(*m_ForgetGateBiasTensor,
m_Data.m_ForgetGateBias->GetTensorInfo());
57 m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
58 BuildArmComputeTensor(*m_CellBiasTensor,
m_Data.m_CellBias->GetTensorInfo());
60 m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
61 BuildArmComputeTensor(*m_OutputGateBiasTensor,
m_Data.m_OutputGateBias->GetTensorInfo());
64 if (!
m_Data.m_Parameters.m_CifgEnabled)
66 m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
67 BuildArmComputeTensor(*m_InputToInputWeightsTensor,
m_Data.m_InputToInputWeights->GetTensorInfo());
69 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
70 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor,
m_Data.m_RecurrentToInputWeights->GetTensorInfo());
72 m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
73 if (
m_Data.m_CellToInputWeights !=
nullptr)
75 BuildArmComputeTensor(*m_CellToInputWeightsTensor,
m_Data.m_CellToInputWeights->GetTensorInfo());
78 m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
79 BuildArmComputeTensor(*m_InputGateBiasTensor,
m_Data.m_InputGateBias->GetTensorInfo());
81 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
82 m_RecurrentToInputWeightsTensor.get(),
83 m_Data.m_CellToInputWeights !=
nullptr ? m_CellToInputWeightsTensor.get() :
nullptr,
84 m_InputGateBiasTensor.get());
87 if (
m_Data.m_Parameters.m_ProjectionEnabled)
89 m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
90 BuildArmComputeTensor(*m_ProjectionWeightsTensor,
m_Data.m_ProjectionWeights->GetTensorInfo());
92 m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
93 if (
m_Data.m_ProjectionBias !=
nullptr)
95 BuildArmComputeTensor(*m_ProjectionBiasTensor,
m_Data.m_ProjectionBias->GetTensorInfo());
98 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
99 m_Data.m_ProjectionBias !=
nullptr ? m_ProjectionBiasTensor.get() :
nullptr);
102 if (
m_Data.m_Parameters.m_PeepholeEnabled)
104 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
105 BuildArmComputeTensor(*m_CellToForgetWeightsTensor,
m_Data.m_CellToForgetWeights->GetTensorInfo());
107 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
108 BuildArmComputeTensor(*m_CellToOutputWeightsTensor,
m_Data.m_CellToOutputWeights->GetTensorInfo());
110 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
113 if (
m_Data.m_Parameters.m_LayerNormEnabled)
115 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
116 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
117 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
118 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
120 if (!
m_Data.m_Parameters.m_CifgEnabled)
122 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor,
m_Data.m_InputLayerNormWeights->GetTensorInfo());
124 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor,
m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
125 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor,
m_Data.m_CellLayerNormWeights->GetTensorInfo());
126 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor,
m_Data.m_OutputLayerNormWeights->GetTensorInfo());
128 lstm_param.set_layer_normalization_params(
m_Data.m_Parameters.m_CifgEnabled ?
nullptr :
129 m_InputLayerNormWeightsTensor.get(),
130 m_ForgetLayerNormWeightsTensor.get(),
131 m_CellLayerNormWeightsTensor.get(),
132 m_OutputLayerNormWeightsTensor.get());
135 const arm_compute::ICLTensor& input =
static_cast<IClTensorHandle*
>(
m_Data.
m_Inputs[0])->GetTensor();
136 const arm_compute::ICLTensor& output_state_in =
static_cast<IClTensorHandle*
>(
m_Data.
m_Inputs[1])->GetTensor();
137 arm_compute::ICLTensor& cell_state_in =
static_cast<IClTensorHandle*
>(
m_Data.
m_Inputs[2])->GetTensor();
139 arm_compute::ICLTensor& output_state_out =
static_cast<IClTensorHandle*
>(
m_Data.
m_Outputs[1])->GetTensor();
140 arm_compute::ICLTensor& cell_state_out =
static_cast<IClTensorHandle*
>(
m_Data.
m_Outputs[2])->GetTensor();
141 arm_compute::ICLTensor& output =
static_cast<IClTensorHandle*
>(
m_Data.
m_Outputs[3])->GetTensor();
144 const TensorInfo& inputTensorInfo =
info.m_InputTensorInfos[2];
145 const unsigned int batch_size =
armnn::numeric_cast<
unsigned int>(inputTensorInfo.GetShape()[0]);
146 const unsigned int num_units =
armnn::numeric_cast<
unsigned int>(inputTensorInfo.GetShape()[1]);
148 m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
149 if (
m_Data.m_Parameters.m_CifgEnabled)
153 BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
159 BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
162 float cell_threshold =
m_Data.m_Parameters.m_ClippingThresCell;
163 float projection_threshold =
m_Data.m_Parameters.m_ClippingThresProj;
166 arm_compute::ActivationLayerInfo activationLayerInfo;
167 if (
m_Data.m_Parameters.m_ActivationFunc == 0)
171 else if (
m_Data.m_Parameters.m_ActivationFunc == 1)
173 activationLayerInfo = arm_compute::ActivationLayerInfo(
174 arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
176 else if (
m_Data.m_Parameters.m_ActivationFunc == 3)
178 activationLayerInfo = arm_compute::ActivationLayerInfo(
179 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
181 else if (
m_Data.m_Parameters.m_ActivationFunc == 4)
183 activationLayerInfo = arm_compute::ActivationLayerInfo(
184 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
186 else if (
m_Data.m_Parameters.m_ActivationFunc == 6)
188 activationLayerInfo = arm_compute::ActivationLayerInfo(
189 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
196 m_LstmLayer.configure(clCompileContext, &input, m_InputToForgetWeightsTensor.get(),
197 m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),
198 m_RecurrentToForgetWeightsTensor.get(), m_RecurrentToCellWeightsTensor.get(),
199 m_RecurrentToOutputWeightsTensor.get(), m_ForgetGateBiasTensor.get(),
200 m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(), &output_state_in,
201 &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
202 &cell_state_out, &output, lstm_param, activationLayerInfo,
203 cell_threshold, projection_threshold);
205 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
217 if (!
m_Data.m_Parameters.m_CifgEnabled)
221 if (
m_Data.m_CellToInputWeights !=
nullptr)
228 if (
m_Data.m_Parameters.m_ProjectionEnabled)
231 if (
m_Data.m_ProjectionBias !=
nullptr)
237 if (
m_Data.m_Parameters.m_PeepholeEnabled)
243 if (
m_Data.m_Parameters.m_LayerNormEnabled)
245 if (!
m_Data.m_Parameters.m_CifgEnabled)
257 m_LstmLayer.prepare();
std::vector< ITensorHandle * > m_Outputs
Base class for all ArmNN exceptions so that users can filter to just those.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
std::vector< ITensorHandle * > m_Inputs