From 5124be5d1caa70964d452cf9a8cc7c67df31fa9d Mon Sep 17 00:00:00 2001 From: Chunosov Date: Wed, 22 Nov 2017 20:42:13 +0700 Subject: COMPMID-661: Convolution quantized (#32) Change-Id: Id69df4ce98d1d89bdf9c9aa5c4d909659909b30f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110456 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com Reviewed-by: Georgios Pinitas Reviewed-by: Anthony Barbier --- .../core/CL/kernels/CLWeightsReshapeKernel.h | 1 + arm_compute/core/Types.h | 52 ++++++++++++++++++++++ 2 files changed, 53 insertions(+) (limited to 'arm_compute/core') diff --git a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h index ec8940ef7e..6c84ded49e 100644 --- a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h +++ b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h @@ -50,6 +50,7 @@ public: * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QS8/QS16/QASYMM8/F16/F32 * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with * dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input + * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types. * @param[out] output The output tensor. Should be a 2D Tensor. Data types supported: Same as @p input */ void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output); diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index c77f1d4157..beaec143ef 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -827,6 +827,58 @@ private: const unsigned int _num_kernels; }; +/** GEMM Information class. This class stores the necessary information to compute GEMM functions */ +class GEMMInfo +{ +public: + /** Default constructor */ + GEMMInfo() + : _is_a_reshaped(false), _is_b_reshaped(false), _reshape_b_only_on_first_run(false) + { + } + /** Constructor + * + * @param[in] is_a_reshaped True if the matrix A has been reshaped + * @param[in] is_b_reshaped True if the matrix B has been reshaped + * @param[in] reshape_b_only_on_first_run Reshape matrix B only for the first run + */ + GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run) + : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run) + { + } + /** Flag which specifies if the matrix A has been reshaped + * + * @return True if the matrix A has been reshaped + */ + bool is_a_reshaped() const + { + return _is_a_reshaped; + }; + /** Flag which specifies if the matrix B has been reshaped + * + * @return True if the matrix B has been reshaped + */ + bool is_b_reshaped() const + { + return _is_b_reshaped; + }; + /** Flag which specifies if the reshape of matrix B should executed only for the first + * + * @note This flag could be set to TRUE when GEMM is used to accelerate convolution layer + * + * @return True if the reshaped of matrix B happens only for the first run + */ + bool reshape_b_only_on_first_run() const + { + return _reshape_b_only_on_first_run; + }; + +private: + const bool _is_a_reshaped; + const bool _is_b_reshaped; + const bool _reshape_b_only_on_first_run; +}; + /** IO formatting information class*/ struct IOFormatInfo { -- cgit v1.2.1