aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-05-28 10:04:57 +0100
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-07-16 16:08:25 +0000
commitba27e4467dfc04e23ce9483330be062e9aaebdc5 (patch)
tree3bb9e113307f4358b6f52b399b43f0efa088fc1f /arm_compute
parentd7ed672e4c4deecb7498581790b87bfe99fcf054 (diff)
downloadComputeLibrary-ba27e4467dfc04e23ce9483330be062e9aaebdc5.tar.gz
COMPMID-2236: QUANTIZED_16BIT_LSTM operator for NEON
Change-Id: I554023508e09b790ecc1bbdada529697d6c7b616 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1551 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/Validate.h4
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEDequantizationLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h205
-rw-r--r--arm_compute/runtime/NEON/functions/NEQuantizationLayer.h4
6 files changed, 214 insertions, 8 deletions
diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
index 3320ba6889..f0a2a57d1a 100644
--- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
@@ -52,13 +52,13 @@ public:
~NEDequantizationLayerKernel() = default;
/** Set input, output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index dab4221a3b..37c7b50ec7 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -565,7 +565,7 @@ inline arm_compute::Status error_on_mismatching_quantization_info(const char *fu
DataType &&first_data_type = tensor_info_1->data_type();
const QuantizationInfo first_quantization_info = tensor_info_1->quantization_info();
- if(!is_data_type_quantized_asymmetric(first_data_type))
+ if(!is_data_type_quantized(first_data_type))
{
return arm_compute::Status{};
}
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index d44afcbb0f..b59f24eed5 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -95,6 +95,7 @@
#include "arm_compute/runtime/NEON/functions/NEIntegralImage.h"
#include "arm_compute/runtime/NEON/functions/NEL2NormalizeLayer.h"
#include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
+#include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h"
#include "arm_compute/runtime/NEON/functions/NELaplacianPyramid.h"
#include "arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h"
#include "arm_compute/runtime/NEON/functions/NELocallyConnectedLayer.h"
diff --git a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
index 8c24b38cee..c08366e5a7 100644
--- a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
@@ -39,13 +39,13 @@ class NEDequantizationLayer : public INESimpleFunctionNoBorder
public:
/** Configure the kernel.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayer
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data type supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h b/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h
new file mode 100644
index 0000000000..b45d714990
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NELSTMLAYERQUANTIZED_H__
+#define __ARM_COMPUTE_NELSTMLAYERQUANTIZED_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h"
+#include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
+#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
+#include "arm_compute/runtime/NEON/functions/NEQuantizationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NESlice.h"
+#include "arm_compute/runtime/NEON/functions/NETranspose.h"
+
+#include "arm_compute/runtime/common/LSTMParams.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ITensor;
+
+/** Basic function to run @ref NELSTMLayerQuantized
+ *
+ * This function calls the following NEON functions/kernels:
+ *
+ * -# @ref NEGEMMLowpMatrixMultiplyCore Quantized matrix multiplication core. Accumulators are 32-bit integers
+ * -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint Convert 32-bit integers into QSYMM16
+ * -# @ref NETranspose Matrix transpose
+ * -# @ref NEConcatenateLayer Tensor concatenation
+ * -# @ref NEActivationLayer Activation functions (tanh and logistig)
+ * -# @ref NEArithmeticAddition Elementwise addition
+ * -# @ref NEPixelWiseMultiplication Elementwise multiplication
+ * -# @ref NESlice Tensor slicing
+ * -# @ref NEDequantizationLayer Dequantize into float
+ * -# @ref NEQuantizationLayer Quantize from float
+ * */
+class NELSTMLayerQuantized : public IFunction
+{
+public:
+ /** Default constructor */
+ NELSTMLayerQuantized(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NELSTMLayerQuantized(const NELSTMLayerQuantized &) = delete;
+ /** Default move constructor */
+ NELSTMLayerQuantized(NELSTMLayerQuantized &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NELSTMLayerQuantized &operator=(const NELSTMLayerQuantized &) = delete;
+ /** Default move assignment operator */
+ NELSTMLayerQuantized &operator=(NELSTMLayerQuantized &&) = default;
+ /** Initialize function's tensors.
+ *
+ * @param[in] input Source tensor. Input is a 2D tensor with dimensions [input_size, batch_size]. Data types supported: QASYMM8.
+ * @param[in] input_to_input_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_forget_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_cell_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_output_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_input_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_forget_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_cell_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_output_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_gate_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] forget_gate_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] output_gate_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_state_in 2D tensor with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[in] output_state_in 2D tensor with dimensions [output_size, batch_size]. Data type supported: Same as @p input.
+ * @param[out] cell_state_out Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[out] output_state_out Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size].Data types supported: Same as @p input.
+ */
+ void configure(const ITensor *input,
+ const ITensor *input_to_input_weights, const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights,
+ const ITensor *recurrent_to_input_weights, const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights,
+ const ITensor *input_gate_bias, const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias,
+ ITensor *cell_state_in, const ITensor *output_state_in,
+ ITensor *cell_state_out, ITensor *output_state_out);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref NELSTMLayer
+ *
+ * @param[in] input Source tensor info. Input is a 2D tensor info with dimensions [input_size, batch_size]. Data types supported: QASYMM8.
+ * @param[in] input_to_input_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_forget_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_cell_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_output_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_input_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_forget_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_cell_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_output_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_gate_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] forget_gate_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] output_gate_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_state_in 2D tensor info with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[in] output_state_in 2D tensor info with dimensions [output_size, batch_size]. Data type supported: Same as @p input.
+ * @param[out] cell_state_out Destination tensor info. Output is a 2D tensor info with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[out] output_state_out Destination tensor info. Output is a 2D tensor info with dimensions [output_size, batch_size].Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *input_to_input_weights, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights,
+ const ITensorInfo *recurrent_to_input_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights,
+ const ITensorInfo *input_gate_bias, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias,
+ const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in,
+ const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out);
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ MemoryGroup _memory_group;
+
+ // Functions used
+ NEGEMMLowpMatrixMultiplyCore _gemmlowp;
+ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint _output_stage;
+ NETranspose _transpose_weights;
+ NEConcatenateLayer _concat_input_weights;
+ NEConcatenateLayer _concat_recurrent_weights;
+ NEConcatenateLayer _concat_weights;
+ NEConcatenateLayer _concat_inputs;
+ NEConcatenateLayer _concat_bias;
+ NEActivationLayer _sigmoid_forget_gate;
+ NEActivationLayer _sigmoid_input_gate;
+ NEActivationLayer _sigmoid_output_gate;
+ NEActivationLayer _tanh_modulation_gate;
+ NEActivationLayer _tanh_output_state;
+ NEArithmeticAddition _add1;
+ NEArithmeticAddition _add2;
+ NEPixelWiseMultiplication _mul1;
+ NEPixelWiseMultiplication _mul2;
+ NEPixelWiseMultiplication _mul3;
+ NESlice _slice_input_tensor;
+ NESlice _slice_forget_tensor;
+ NESlice _slice_cell_tensor;
+ NESlice _slice_output_tensor;
+ NEDequantizationLayer _dequantize;
+ NEQuantizationLayer _quantize;
+
+ // Tensor pointers
+ const ITensor *_input_to_input_weights;
+ const ITensor *_input_to_forget_weights;
+ const ITensor *_input_to_cell_weights;
+ const ITensor *_input_to_output_weights;
+ const ITensor *_recurrent_to_input_weights;
+ const ITensor *_recurrent_to_forget_weights;
+ const ITensor *_recurrent_to_cell_weights;
+ const ITensor *_recurrent_to_output_weights;
+ const ITensor *_input_gate_bias;
+ const ITensor *_forget_gate_bias;
+ const ITensor *_cell_bias;
+ const ITensor *_output_gate_bias;
+
+ // Temporary tensors
+ Tensor _recurrent_weights;
+ Tensor _input_weights;
+ Tensor _weights;
+ Tensor _input;
+ Tensor _weights_transposed;
+ Tensor _output_highp;
+ Tensor _output_lowp;
+ Tensor _bias;
+ Tensor _forget_gate_input;
+ Tensor _input_gate_input;
+ Tensor _output_gate_input;
+ Tensor _input_modulation_gate_input;
+ Tensor _forget_gate_output;
+ Tensor _input_gate_output;
+ Tensor _output_gate_output;
+ Tensor _input_modulation_gate_output;
+ Tensor _cell_state1;
+ Tensor _cell_state2;
+ Tensor _output_state_tmp;
+ Tensor _output_state_out_symm;
+ Tensor _output_state_out_f32;
+
+ bool _is_prepared;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NELSTMLAYERQUANTIZED_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
index 5e4b4f754c..46a62bd903 100644
--- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h
@@ -49,13 +49,13 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F32/F16.
- * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8
+ * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QSYMM16
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayer
*
* @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32/F16.
- * @param[in] output Output tensor info. Data types supported: QASYMM8
+ * @param[in] output Output tensor info. Data types supported: QASYMM8/QSYMM16
*
* @return a status
*/