23 : FloatWorkload<LstmQueueDescriptor>(descriptor,
info)
25 arm_compute::LSTMParams<arm_compute::ICLTensor> lstm_param;
28 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
29 BuildArmComputeTensor(*m_InputToForgetWeightsTensor,
m_Data.m_InputToForgetWeights->GetTensorInfo());
31 m_InputToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
32 BuildArmComputeTensor(*m_InputToCellWeightsTensor,
m_Data.m_InputToCellWeights->GetTensorInfo());
34 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
35 BuildArmComputeTensor(*m_InputToOutputWeightsTensor,
m_Data.m_InputToOutputWeights->GetTensorInfo());
37 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
38 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor,
m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
40 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::CLTensor>();
41 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor,
m_Data.m_RecurrentToCellWeights->GetTensorInfo());
43 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
44 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor,
m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
46 m_ForgetGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
47 BuildArmComputeTensor(*m_ForgetGateBiasTensor,
m_Data.m_ForgetGateBias->GetTensorInfo());
49 m_CellBiasTensor = std::make_unique<arm_compute::CLTensor>();
50 BuildArmComputeTensor(*m_CellBiasTensor,
m_Data.m_CellBias->GetTensorInfo());
52 m_OutputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
53 BuildArmComputeTensor(*m_OutputGateBiasTensor,
m_Data.m_OutputGateBias->GetTensorInfo());
56 if (!
m_Data.m_Parameters.m_CifgEnabled)
58 m_InputToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
59 BuildArmComputeTensor(*m_InputToInputWeightsTensor,
m_Data.m_InputToInputWeights->GetTensorInfo());
61 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
62 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor,
m_Data.m_RecurrentToInputWeights->GetTensorInfo());
64 m_CellToInputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
65 if (
m_Data.m_CellToInputWeights !=
nullptr)
67 BuildArmComputeTensor(*m_CellToInputWeightsTensor,
m_Data.m_CellToInputWeights->GetTensorInfo());
70 m_InputGateBiasTensor = std::make_unique<arm_compute::CLTensor>();
71 BuildArmComputeTensor(*m_InputGateBiasTensor,
m_Data.m_InputGateBias->GetTensorInfo());
73 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
74 m_RecurrentToInputWeightsTensor.get(),
75 m_Data.m_CellToInputWeights !=
nullptr ? m_CellToInputWeightsTensor.get() :
nullptr,
76 m_InputGateBiasTensor.get());
79 if (
m_Data.m_Parameters.m_ProjectionEnabled)
81 m_ProjectionWeightsTensor = std::make_unique<arm_compute::CLTensor>();
82 BuildArmComputeTensor(*m_ProjectionWeightsTensor,
m_Data.m_ProjectionWeights->GetTensorInfo());
84 m_ProjectionBiasTensor = std::make_unique<arm_compute::CLTensor>();
85 if (
m_Data.m_ProjectionBias !=
nullptr)
87 BuildArmComputeTensor(*m_ProjectionBiasTensor,
m_Data.m_ProjectionBias->GetTensorInfo());
90 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
91 m_Data.m_ProjectionBias !=
nullptr ? m_ProjectionBiasTensor.get() :
nullptr);
94 if (
m_Data.m_Parameters.m_PeepholeEnabled)
96 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::CLTensor>();
97 BuildArmComputeTensor(*m_CellToForgetWeightsTensor,
m_Data.m_CellToForgetWeights->GetTensorInfo());
99 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::CLTensor>();
100 BuildArmComputeTensor(*m_CellToOutputWeightsTensor,
m_Data.m_CellToOutputWeights->GetTensorInfo());
102 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
105 if (
m_Data.m_Parameters.m_LayerNormEnabled)
107 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
108 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
109 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
110 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::CLTensor>();
112 if (!
m_Data.m_Parameters.m_CifgEnabled)
114 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor,
m_Data.m_InputLayerNormWeights->GetTensorInfo());
116 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor,
m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
117 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor,
m_Data.m_CellLayerNormWeights->GetTensorInfo());
118 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor,
m_Data.m_OutputLayerNormWeights->GetTensorInfo());
120 lstm_param.set_layer_normalization_params(
m_Data.m_Parameters.m_CifgEnabled ?
nullptr :
121 m_InputLayerNormWeightsTensor.get(),
122 m_ForgetLayerNormWeightsTensor.get(),
123 m_CellLayerNormWeightsTensor.get(),
124 m_OutputLayerNormWeightsTensor.get());
127 const arm_compute::ICLTensor& input =
static_cast<IClTensorHandle*
>(
m_Data.
m_Inputs[0])->GetTensor();
128 const arm_compute::ICLTensor& output_state_in =
static_cast<IClTensorHandle*
>(
m_Data.
m_Inputs[1])->GetTensor();
129 arm_compute::ICLTensor& cell_state_in =
static_cast<IClTensorHandle*
>(
m_Data.
m_Inputs[2])->GetTensor();
131 arm_compute::ICLTensor& output_state_out =
static_cast<IClTensorHandle*
>(
m_Data.
m_Outputs[1])->GetTensor();
132 arm_compute::ICLTensor& cell_state_out =
static_cast<IClTensorHandle*
>(
m_Data.
m_Outputs[2])->GetTensor();
133 arm_compute::ICLTensor& output =
static_cast<IClTensorHandle*
>(
m_Data.
m_Outputs[3])->GetTensor();
136 const TensorInfo& inputTensorInfo =
info.m_InputTensorInfos[2];
137 const unsigned int batch_size =
armnn::numeric_cast<
unsigned int>(inputTensorInfo.GetShape()[0]);
138 const unsigned int num_units =
armnn::numeric_cast<
unsigned int>(inputTensorInfo.GetShape()[1]);
140 m_ScratchBuffer = std::make_unique<arm_compute::CLTensor>();
141 if (
m_Data.m_Parameters.m_CifgEnabled)
145 BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
151 BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
154 float cell_threshold =
m_Data.m_Parameters.m_ClippingThresCell;
155 float projection_threshold =
m_Data.m_Parameters.m_ClippingThresProj;
158 arm_compute::ActivationLayerInfo activationLayerInfo;
159 if (
m_Data.m_Parameters.m_ActivationFunc == 0)
163 else if (
m_Data.m_Parameters.m_ActivationFunc == 1)
165 activationLayerInfo = arm_compute::ActivationLayerInfo(
166 arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
168 else if (
m_Data.m_Parameters.m_ActivationFunc == 3)
170 activationLayerInfo = arm_compute::ActivationLayerInfo(
171 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
173 else if (
m_Data.m_Parameters.m_ActivationFunc == 4)
175 activationLayerInfo = arm_compute::ActivationLayerInfo(
176 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
178 else if (
m_Data.m_Parameters.m_ActivationFunc == 6)
180 activationLayerInfo = arm_compute::ActivationLayerInfo(
181 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
188 m_LstmLayer.configure(&input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(),
189 m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
190 m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
191 m_ForgetGateBiasTensor.get(), m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(),
192 &output_state_in, &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
193 &cell_state_out, &output, lstm_param, activationLayerInfo,
194 cell_threshold, projection_threshold);
196 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
208 if (!
m_Data.m_Parameters.m_CifgEnabled)
212 if (
m_Data.m_CellToInputWeights !=
nullptr)
219 if (
m_Data.m_Parameters.m_ProjectionEnabled)
222 if (
m_Data.m_ProjectionBias !=
nullptr)
228 if (
m_Data.m_Parameters.m_PeepholeEnabled)
234 if (
m_Data.m_Parameters.m_LayerNormEnabled)
236 if (!
m_Data.m_Parameters.m_CifgEnabled)
248 m_LstmLayer.prepare();
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstCpuTensorHandle *handle)
const QueueDescriptor m_Data
std::vector< ITensorHandle * > m_Outputs
Base class for all ArmNN exceptions so that users can filter to just those.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
std::vector< ITensorHandle * > m_Inputs