aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/NEON/functions/NEQLSTMLayer.h')
-rw-r--r--arm_compute/runtime/NEON/functions/NEQLSTMLayer.h362
1 files changed, 199 insertions, 163 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h b/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
index 7c2e9bc5a1..009a4e0911 100644
--- a/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,15 +25,17 @@
#define ARM_COMPUTE_NEQLSTMLAYER_H
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/common/LSTMParams.h"
#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h"
#include "arm_compute/runtime/NEON/functions/NECopy.h"
+#include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
+#include "arm_compute/runtime/NEON/functions/NEQuantizationLayer.h"
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
-#include "arm_compute/runtime/common/LSTMParams.h"
#include <memory>
@@ -43,8 +45,13 @@ namespace arm_compute
class ITensor;
class ITensorInfo;
class NEQLSTMLayerNormalizationKernel;
-class NEGEMMLowpMatrixAReductionKernel;
-
+namespace cpu
+{
+namespace kernels
+{
+class CpuGemmLowpMatrixAReductionKernel;
+} // namespace kernels
+} // namespace cpu
/** Basic function to run @ref NEQLSTMLayer
*
* This function calls the following kernels:
@@ -54,8 +61,8 @@ class NEGEMMLowpMatrixAReductionKernel;
* -# @ref NEArithmeticSubtraction Elementwise subtraction
* -# @ref NECopy Copy kernel for copying output_state_out to output
* -# @ref NEGEMMLowpMatrixMultiplyCore Quantized matrix multiplication core. Accumulators are 32-bit integers
- * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint Convert 32-bit integers into QSYMM16
- * -# @ref NEGEMMLowpMatrixAReductionKernel For precomputing effective biases to use
+ * -# @ref NEGEMMLowpOutputStage Convert 32-bit integers into QSYMM16
+ * -# @ref cpu::kernels::CpuGemmLowpMatrixAReductionKernel For precomputing effective biases to use
* -# @ref NEPixelWiseMultiplication Elementwise multiplication
* -# @ref NETranspose Transpose function for reshaping the weights
* */
@@ -123,12 +130,21 @@ public:
* projection_threshold (Optional) The clipping threshold for the output from the projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
*/
- void configure(const ITensor *input,
- const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights,
- const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights,
- const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias,
- const ITensor *cell_state_in, ITensor *output_state_in,
- ITensor *cell_state_out, ITensor *output_state_out, ITensor *output,
+ void configure(const ITensor *input,
+ const ITensor *input_to_forget_weights,
+ const ITensor *input_to_cell_weights,
+ const ITensor *input_to_output_weights,
+ const ITensor *recurrent_to_forget_weights,
+ const ITensor *recurrent_to_cell_weights,
+ const ITensor *recurrent_to_output_weights,
+ const ITensor *forget_gate_bias,
+ const ITensor *cell_bias,
+ const ITensor *output_gate_bias,
+ const ITensor *cell_state_in,
+ ITensor *output_state_in,
+ ITensor *cell_state_out,
+ ITensor *output_state_out,
+ ITensor *output,
const LSTMParams<ITensor> &lstm_params);
/** Static function to check if given info will lead to a valid configuration of @ref NEQLSTMLayer
@@ -173,12 +189,21 @@ public:
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* @return a status
*/
- static Status validate(const ITensorInfo *input,
- const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights,
- const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights,
- const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias,
- const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in,
- const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out, const ITensorInfo *output,
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *input_to_forget_weights,
+ const ITensorInfo *input_to_cell_weights,
+ const ITensorInfo *input_to_output_weights,
+ const ITensorInfo *recurrent_to_forget_weights,
+ const ITensorInfo *recurrent_to_cell_weights,
+ const ITensorInfo *recurrent_to_output_weights,
+ const ITensorInfo *forget_gate_bias,
+ const ITensorInfo *cell_bias,
+ const ITensorInfo *output_gate_bias,
+ const ITensorInfo *cell_state_in,
+ const ITensorInfo *output_state_in,
+ const ITensorInfo *cell_state_out,
+ const ITensorInfo *output_state_out,
+ const ITensorInfo *output,
const LSTMParams<ITensorInfo> &lstm_params);
// Inherited methods overridden:
@@ -211,10 +236,17 @@ private:
* @param[in] mm_res_info Tensor info to be used to initialize output stage result tensor.
*
*/
- void configure_mm(NEGEMMLowpMatrixMultiplyCore &mm, NEGEMMLowpOutputStage &outstage, GEMMLowpOutputStageInfo &gemmlowp_info,
- const ITensor *mm_input, const ITensor *mm_weights, const ITensor *bias, Tensor *mm_res,
- Tensor *outstage_res, float gemmlowp_scale,
- const TensorInfo &mm_res_info, const TensorInfo &outstage_tensor_info);
+ void configure_mm(NEGEMMLowpMatrixMultiplyCore &mm,
+ NEGEMMLowpOutputStage &outstage,
+ GEMMLowpOutputStageInfo &gemmlowp_info,
+ const ITensor *mm_input,
+ const ITensor *mm_weights,
+ const ITensor *bias,
+ Tensor *mm_res,
+ Tensor *outstage_res,
+ float gemmlowp_scale,
+ const TensorInfo &mm_res_info,
+ const TensorInfo &outstage_tensor_info);
MemoryGroup _memory_group;
@@ -223,8 +255,8 @@ private:
{
static constexpr uint32_t max_dimension_supported = 2;
- ITensor *_src{ nullptr };
- ITensor *_dst{ nullptr };
+ ITensor *_src{nullptr};
+ ITensor *_dst{nullptr};
size_t _row_size{};
Window _window{};
@@ -250,70 +282,73 @@ private:
};
// Functions used
- NETranspose _transpose_input_to_forget_weights;
- NETranspose _transpose_input_to_cell_weights;
- NETranspose _transpose_input_to_output_weights;
- NETranspose _transpose_input_to_input_weights;
- NETranspose _transpose_recurrent_to_forget_weights;
- NETranspose _transpose_recurrent_to_cell_weights;
- NETranspose _transpose_recurrent_to_output_weights;
- NETranspose _transpose_recurrent_to_input_weights;
- NETranspose _transpose_projection_weights;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _input_to_input_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _recurrent_to_input_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _input_to_forget_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _recurrent_to_forget_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _input_to_cell_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _recurrent_to_cell_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _input_to_output_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _recurrent_to_output_reduction;
- std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> _projection_reduction;
- NEArithmeticAddition _projection_bias_add;
- NEGEMMLowpMatrixMultiplyCore _mm_input_to_forget;
- NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_forget;
- NEPixelWiseMultiplication _pixelwise_mul_cell_to_forget;
- NEGEMMLowpOutputStage _input_to_forget_outstage;
- NEGEMMLowpOutputStage _recurrent_to_forget_outstage;
- NEGEMMLowpOutputStage _cell_to_forget_outstage;
- NEArithmeticAddition _accumulate_input_recurrent_forget;
- NEArithmeticAddition _accumulate_cell_forget;
- NEActivationLayer _forget_gate_sigmoid;
- NEGEMMLowpMatrixMultiplyCore _mm_input_to_cell;
- NEGEMMLowpOutputStage _input_to_cell_outstage;
- NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_cell;
- NEGEMMLowpOutputStage _recurrent_to_cell_outstage;
- NEArithmeticAddition _accumulate_input_recurrent_modulation;
- NEActivationLayer _cell_gate_tanh;
- NEArithmeticSubtraction _input_gate_sub;
- NEGEMMLowpMatrixMultiplyCore _mm_input_to_input;
- NEGEMMLowpOutputStage _input_to_input_outstage;
- NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_input;
- NEGEMMLowpOutputStage _recurrent_to_input_outstage;
- NEArithmeticAddition _accumulate_input_recurrent_input;
- NEPixelWiseMultiplication _pixelwise_mul_cell_to_input;
- NEGEMMLowpOutputStage _cell_to_input_outstage;
- NEArithmeticAddition _accumulate_cell_input;
- NEActivationLayer _input_gate_sigmoid;
- NEPixelWiseMultiplication _pixelwise_mul_forget_cell;
- NEPixelWiseMultiplication _pixelwise_mul_input_cell;
- NEArithmeticAddition _add_forget_cell;
- NEActivationLayer _cell_clip;
- NEGEMMLowpMatrixMultiplyCore _mm_input_to_output;
- NEGEMMLowpOutputStage _input_to_output_outstage;
- NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_output;
- NEGEMMLowpOutputStage _recurrent_to_output_outstage;
- NEArithmeticAddition _accumulate_input_recurrent_output;
- NEPixelWiseMultiplication _pixelwise_mul_cell_to_output;
- NEGEMMLowpOutputStage _cell_to_output_outstage;
- NEArithmeticAddition _accumulate_cell_to_output;
- NEActivationLayer _output_gate_sigmoid;
- NEActivationLayer _hidden_tanh;
- NEPixelWiseMultiplication _pixelwise_mul_hidden;
- NEGEMMLowpOutputStage _hidden_outstage;
- NEGEMMLowpMatrixMultiplyCore _mm_projection;
- NEGEMMLowpOutputStage _projection_outstage;
- NEArithmeticAddition _accumulate_projection;
- NEActivationLayer _projection_clip;
+
+ NEDequantizationLayer _dequantize_input_to_forget_weights;
+ NEQuantizationLayer _quantize_input_to_forget_weights;
+ NETranspose _transpose_input_to_forget_weights;
+ NETranspose _transpose_input_to_cell_weights;
+ NETranspose _transpose_input_to_output_weights;
+ NETranspose _transpose_input_to_input_weights;
+ NETranspose _transpose_recurrent_to_forget_weights;
+ NETranspose _transpose_recurrent_to_cell_weights;
+ NETranspose _transpose_recurrent_to_output_weights;
+ NETranspose _transpose_recurrent_to_input_weights;
+ NETranspose _transpose_projection_weights;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _input_to_input_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _recurrent_to_input_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _input_to_forget_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _recurrent_to_forget_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _input_to_cell_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _recurrent_to_cell_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _input_to_output_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _recurrent_to_output_reduction;
+ std::unique_ptr<cpu::kernels::CpuGemmLowpMatrixAReductionKernel> _projection_reduction;
+ NEArithmeticAddition _projection_bias_add;
+ NEGEMMLowpMatrixMultiplyCore _mm_input_to_forget;
+ NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_forget;
+ NEPixelWiseMultiplication _pixelwise_mul_cell_to_forget;
+ NEGEMMLowpOutputStage _input_to_forget_outstage;
+ NEGEMMLowpOutputStage _recurrent_to_forget_outstage;
+ NEGEMMLowpOutputStage _cell_to_forget_outstage;
+ NEArithmeticAddition _accumulate_input_recurrent_forget;
+ NEArithmeticAddition _accumulate_cell_forget;
+ NEActivationLayer _forget_gate_sigmoid;
+ NEGEMMLowpMatrixMultiplyCore _mm_input_to_cell;
+ NEGEMMLowpOutputStage _input_to_cell_outstage;
+ NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_cell;
+ NEGEMMLowpOutputStage _recurrent_to_cell_outstage;
+ NEArithmeticAddition _accumulate_input_recurrent_modulation;
+ NEActivationLayer _cell_gate_tanh;
+ NEArithmeticSubtraction _input_gate_sub;
+ NEGEMMLowpMatrixMultiplyCore _mm_input_to_input;
+ NEGEMMLowpOutputStage _input_to_input_outstage;
+ NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_input;
+ NEGEMMLowpOutputStage _recurrent_to_input_outstage;
+ NEArithmeticAddition _accumulate_input_recurrent_input;
+ NEPixelWiseMultiplication _pixelwise_mul_cell_to_input;
+ NEGEMMLowpOutputStage _cell_to_input_outstage;
+ NEArithmeticAddition _accumulate_cell_input;
+ NEActivationLayer _input_gate_sigmoid;
+ NEPixelWiseMultiplication _pixelwise_mul_forget_cell;
+ NEPixelWiseMultiplication _pixelwise_mul_input_cell;
+ NEArithmeticAddition _add_forget_cell;
+ NEActivationLayer _cell_clip;
+ NEGEMMLowpMatrixMultiplyCore _mm_input_to_output;
+ NEGEMMLowpOutputStage _input_to_output_outstage;
+ NEGEMMLowpMatrixMultiplyCore _mm_recurrent_to_output;
+ NEGEMMLowpOutputStage _recurrent_to_output_outstage;
+ NEArithmeticAddition _accumulate_input_recurrent_output;
+ NEPixelWiseMultiplication _pixelwise_mul_cell_to_output;
+ NEGEMMLowpOutputStage _cell_to_output_outstage;
+ NEArithmeticAddition _accumulate_cell_to_output;
+ NEActivationLayer _output_gate_sigmoid;
+ NEActivationLayer _hidden_tanh;
+ NEPixelWiseMultiplication _pixelwise_mul_hidden;
+ NEGEMMLowpOutputStage _hidden_outstage;
+ NEGEMMLowpMatrixMultiplyCore _mm_projection;
+ NEGEMMLowpOutputStage _projection_outstage;
+ NEArithmeticAddition _accumulate_projection;
+ NEActivationLayer _projection_clip;
TensorCopyKernel _projection_bias_copy;
TensorCopyKernel _projection_output_to_accumulate_copy;
@@ -325,19 +360,16 @@ private:
NECopy _copy_output;
// Tensor pointers
- const ITensor *_input_to_input_weights
- {
- nullptr
- };
- const ITensor *_recurrent_to_input_weights{ nullptr };
- const ITensor *_projection_bias{ nullptr };
- const ITensor *_input_to_forget_weights{ nullptr };
- const ITensor *_input_to_cell_weights{ nullptr };
- const ITensor *_input_to_output_weights{ nullptr };
- const ITensor *_recurrent_to_forget_weights{ nullptr };
- const ITensor *_recurrent_to_cell_weights{ nullptr };
- const ITensor *_recurrent_to_output_weights{ nullptr };
- const ITensor *_projection_weights{ nullptr };
+ const ITensor *_input_to_input_weights{nullptr};
+ const ITensor *_recurrent_to_input_weights{nullptr};
+ const ITensor *_projection_bias{nullptr};
+ const ITensor *_input_to_forget_weights{nullptr};
+ const ITensor *_input_to_cell_weights{nullptr};
+ const ITensor *_input_to_output_weights{nullptr};
+ const ITensor *_recurrent_to_forget_weights{nullptr};
+ const ITensor *_recurrent_to_cell_weights{nullptr};
+ const ITensor *_recurrent_to_output_weights{nullptr};
+ const ITensor *_projection_weights{nullptr};
std::array<const ITensor *, _layer_norm_count> _layer_norm_weights{};
std::array<const ITensor *, _layer_norm_count> _layer_norm_bias{};
@@ -372,63 +404,66 @@ private:
return _layer_norms[getGateIndex(g)];
}
- void configure_layer_norm(LayerNormGate g, const ITensor *in);
+ void configure_layer_norm(LayerNormGate g, const ITensor *in);
static Status validate_layer_norm(const ITensorInfo &in, const ITensorInfo &weight, const ITensorInfo &bias);
// Temporary tensors
- Tensor _input_to_forget_weights_transposed{ nullptr };
- Tensor _input_to_cell_weights_transposed{ nullptr };
- Tensor _input_to_output_weights_transposed{ nullptr };
- Tensor _input_to_input_weights_transposed{ nullptr };
- Tensor _recurrent_to_forget_weights_transposed{ nullptr };
- Tensor _recurrent_to_cell_weights_transposed{ nullptr };
- Tensor _recurrent_to_output_weights_transposed{ nullptr };
- Tensor _recurrent_to_input_weights_transposed{ nullptr };
- Tensor _projection_weights_transposed{ nullptr };
- Tensor _input_to_input_eff_bias{ nullptr };
- Tensor _recurrent_to_input_eff_bias{ nullptr };
- Tensor _input_to_forget_eff_bias{ nullptr };
- Tensor _recurrent_to_forget_eff_bias{ nullptr };
- Tensor _input_to_cell_eff_bias{ nullptr };
- Tensor _recurrent_to_cell_eff_bias{ nullptr };
- Tensor _input_to_output_eff_bias{ nullptr };
- Tensor _recurrent_to_output_eff_bias{ nullptr };
- Tensor _projection_reduction_res{ nullptr };
- Tensor _projection_eff_bias{ nullptr };
- Tensor _mm_input_to_forget_res{ nullptr };
- Tensor _mm_recurrent_to_forget_res{ nullptr };
- Tensor _mul_cell_to_forget_res{ nullptr };
- Tensor _input_to_forget_outstage_res{ nullptr };
- Tensor _cell_to_forget_outstage_res{ nullptr };
- Tensor _recurrent_to_forget_outstage_res{ nullptr };
- Tensor _forget_gate{ nullptr };
- Tensor _mm_input_to_cell_res{ nullptr };
- Tensor _input_to_cell_outstage_res{ nullptr };
- Tensor _mm_recurrent_to_cell_res{ nullptr };
- Tensor _recurrent_to_cell_outstage_res{ nullptr };
- Tensor _cell_gate{ nullptr };
- Tensor _mul_input_cell_res{ nullptr };
- Tensor _mm_input_to_input_res{ nullptr };
- Tensor _input_to_input_outstage_res{ nullptr };
- Tensor _mm_recurrent_to_input_res{ nullptr };
- Tensor _mul_cell_to_input_res{ nullptr };
- Tensor _cell_to_input_outstage_res{ nullptr };
- Tensor _recurrent_to_input_outstage_res{ nullptr };
- Tensor _input_gate{ nullptr };
- Tensor _mm_input_to_output_res{ nullptr };
- Tensor _input_to_output_outstage_res{ nullptr };
- Tensor _mm_recurrent_to_output_res{ nullptr };
- Tensor _mul_cell_to_output_res{ nullptr };
- Tensor _cell_to_output_outstage_res{ nullptr };
- Tensor _recurrent_to_output_outstage_res{ nullptr };
- Tensor _output_gate{ nullptr };
- Tensor _hidden_mul_res{ nullptr };
- Tensor _hidden_gate{ nullptr };
- Tensor _mm_projection_res{ nullptr };
- Tensor _projection_outstage_res{ nullptr };
- Tensor _projection_out_res{ nullptr };
- Tensor _projection_accumulate_res{ nullptr };
- Tensor _ones{ nullptr };
+ Tensor _input_to_forget_weights_f32{nullptr};
+ Tensor _input_to_forget_weights_symm8{nullptr};
+
+ Tensor _input_to_forget_weights_transposed{nullptr};
+ Tensor _input_to_cell_weights_transposed{nullptr};
+ Tensor _input_to_output_weights_transposed{nullptr};
+ Tensor _input_to_input_weights_transposed{nullptr};
+ Tensor _recurrent_to_forget_weights_transposed{nullptr};
+ Tensor _recurrent_to_cell_weights_transposed{nullptr};
+ Tensor _recurrent_to_output_weights_transposed{nullptr};
+ Tensor _recurrent_to_input_weights_transposed{nullptr};
+ Tensor _projection_weights_transposed{nullptr};
+ Tensor _input_to_input_eff_bias{nullptr};
+ Tensor _recurrent_to_input_eff_bias{nullptr};
+ Tensor _input_to_forget_eff_bias{nullptr};
+ Tensor _recurrent_to_forget_eff_bias{nullptr};
+ Tensor _input_to_cell_eff_bias{nullptr};
+ Tensor _recurrent_to_cell_eff_bias{nullptr};
+ Tensor _input_to_output_eff_bias{nullptr};
+ Tensor _recurrent_to_output_eff_bias{nullptr};
+ Tensor _projection_reduction_res{nullptr};
+ Tensor _projection_eff_bias{nullptr};
+ Tensor _mm_input_to_forget_res{nullptr};
+ Tensor _mm_recurrent_to_forget_res{nullptr};
+ Tensor _mul_cell_to_forget_res{nullptr};
+ Tensor _input_to_forget_outstage_res{nullptr};
+ Tensor _cell_to_forget_outstage_res{nullptr};
+ Tensor _recurrent_to_forget_outstage_res{nullptr};
+ Tensor _forget_gate{nullptr};
+ Tensor _mm_input_to_cell_res{nullptr};
+ Tensor _input_to_cell_outstage_res{nullptr};
+ Tensor _mm_recurrent_to_cell_res{nullptr};
+ Tensor _recurrent_to_cell_outstage_res{nullptr};
+ Tensor _cell_gate{nullptr};
+ Tensor _mul_input_cell_res{nullptr};
+ Tensor _mm_input_to_input_res{nullptr};
+ Tensor _input_to_input_outstage_res{nullptr};
+ Tensor _mm_recurrent_to_input_res{nullptr};
+ Tensor _mul_cell_to_input_res{nullptr};
+ Tensor _cell_to_input_outstage_res{nullptr};
+ Tensor _recurrent_to_input_outstage_res{nullptr};
+ Tensor _input_gate{nullptr};
+ Tensor _mm_input_to_output_res{nullptr};
+ Tensor _input_to_output_outstage_res{nullptr};
+ Tensor _mm_recurrent_to_output_res{nullptr};
+ Tensor _mul_cell_to_output_res{nullptr};
+ Tensor _cell_to_output_outstage_res{nullptr};
+ Tensor _recurrent_to_output_outstage_res{nullptr};
+ Tensor _output_gate{nullptr};
+ Tensor _hidden_mul_res{nullptr};
+ Tensor _hidden_gate{nullptr};
+ Tensor _mm_projection_res{nullptr};
+ Tensor _projection_outstage_res{nullptr};
+ Tensor _projection_out_res{nullptr};
+ Tensor _projection_accumulate_res{nullptr};
+ Tensor _ones{nullptr};
std::array<Tensor, _layer_norm_count> _layer_norm_output{};
inline Tensor &get_layer_norm_output(LayerNormGate g)
@@ -436,14 +471,15 @@ private:
return _layer_norm_output[getGateIndex(g)];
}
- bool _is_prepared{ false };
- bool _has_cifg{ false };
- bool _has_cell_clipping{ false };
- bool _has_projection{ false };
- bool _has_projection_clipping{ false };
- bool _has_peephole{ false };
- bool _has_layer_norm{ false };
- bool _projection_tensor_copy_required{ false };
+ bool _is_prepared{false};
+ bool _has_cifg{false};
+ bool _has_cell_clipping{false};
+ bool _has_projection{false};
+ bool _has_projection_clipping{false};
+ bool _has_peephole{false};
+ bool _has_layer_norm{false};
+ bool _projection_tensor_copy_required{false};
+ bool _convert_input_to_forget_weights_to_qsymm8{false};
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEQLSTMLAYER_H */