From 164b65d3c8f61f1d6d404fb484c1998a20a2cbda Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Fri, 13 Apr 2018 14:28:08 +0100 Subject: COMPMID-1043: Rework GCGEMMMatrixMultiplyKernel interface and allow auto initialization of the tensors This patch also: - removes support for already reshaped weights in GCConvolutionLayer - makes GCConvolutionLayer similar to CLGEMMConvolutionLayer - enables usage of the GCGEMM function in GCConvolution instead of calling the GEMM kernels directly Change-Id: I3e4a64335555e86e18585d38d8fda4bfdb44e265 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127696 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- .../GLES_COMPUTE/functions/GCConvolutionLayer.h | 74 +++++++++++++++------- .../runtime/GLES_COMPUTE/functions/GCGEMM.h | 16 +++++ 2 files changed, 67 insertions(+), 23 deletions(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h index 54b17b40bb..fa29f447c8 100644 --- a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h @@ -27,15 +27,13 @@ #include "arm_compute/core/GLES_COMPUTE/kernels/GCCol2ImKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h" -#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMInterleave4x4Kernel.h" -#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h" -#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCIm2ColKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCWeightsReshapeKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/GLES_COMPUTE/GCMemoryGroup.h" #include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCActivationLayer.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h" #include "arm_compute/runtime/IFunction.h" #include @@ -46,7 +44,6 @@ class IGCTensor; /** Function to reshape and transpose the weights. This function calls the following kernels: * -# @ref GCWeightsReshapeKernel - * -# @ref GCGEMMTranspose1xWKernel */ class GCConvolutionLayerReshapeWeights : public IFunction { @@ -55,22 +52,18 @@ public: GCConvolutionLayerReshapeWeights(); /** Set the input and output tensors. * - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. - * Data type supported: F16/F32. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. - * @param[out] output Destination tensor. Data types supported: Same as @p weights. - * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise. - * Data types supported: Same as @p weights. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. + * Data type supported: F16/F32. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. + * @param[out] output Destination tensor. Data types supported: Same as @p weights. */ - void configure(const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, bool transpose1xW); + void configure(const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output); // Inherited methods overridden: void run() override; private: - GCWeightsReshapeKernel _weights_reshape_kernel; - GCGEMMTranspose1xWKernel _weights_transposed_kernel; - GCTensor _weights_reshaped; - bool _transpose1xW; + GCWeightsReshapeKernel _weights_reshape_kernel; + GCTensor _weights_reshaped; }; /** Basic function to compute the convolution layer. This function calls the following GLES kernels: @@ -86,7 +79,14 @@ class GCConvolutionLayer : public IFunction public: /** Default constructor */ GCConvolutionLayer(std::shared_ptr memory_manager = nullptr); - + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GCConvolutionLayer(const GCConvolutionLayer &) = delete; + /** Default move constructor */ + GCConvolutionLayer(GCConvolutionLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GCConvolutionLayer &operator=(const GCConvolutionLayer &) = delete; + /** Default move assignment operator */ + GCConvolutionLayer &operator=(GCConvolutionLayer &&) = default; /** Set the input and output tensors. * * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], @@ -105,6 +105,26 @@ public: */ void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref GCConvolutionLayer. + * + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: QS8/QASYMM8/QS16/F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type. + * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] weights_info Specifies if the weights tensor has been reshaped with GCWeightsReshapeKernel. If this is not part of the fully connected layer the weights + * tensor has also been transposed with GCGEMMTranspose1xWKernel. Data type supported: Same as @p input. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, + const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo()); // Inherited methods overridden: void run() override; @@ -115,20 +135,30 @@ private: * @param input Input tensor. Data types supported: F16/F32. * @param weights Weights tensor. Data type supported: Same as @p input. * @param output Output tensor. Data types supported: Same as @p input, - * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed */ - void configure_mm(const IGCTensor *input, const IGCTensor *weights, IGCTensor *output, bool is_interleaved_transposed = true); + void configure_mm(const IGCTensor *input, const IGCTensor *weights, IGCTensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref GCGEMMConvolutionLayer matrix multiply routines + * + * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32. + * @param[in] weights Weights tensor. Data type supported: Same as @p input. + * @param[in] output Output tensor. Data types supported: Same as @p input, + * except for input of QASYMM8 type where output should be of S32 type. + * + * @return a status + */ + static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output); private: GCMemoryGroup _memory_group; GCConvolutionLayerReshapeWeights _reshape_weights; GCIm2ColKernel _input_im2col_kernel; - GCGEMMInterleave4x4Kernel _input_interleave_kernel; - GCGEMMMatrixMultiplyKernel _mm_kernel; + GCGEMM _mm_gemm; GCCol2ImKernel _output_col2im_kernel; GCFillBorderKernel _fill_border; GCActivationLayer _activationlayer_function; + const IGCTensor *_original_weights; + GCTensor _input_im2col_reshaped; GCTensor _input_interleaved_reshaped; GCTensor _weights_reshaped; @@ -136,9 +166,7 @@ private: GCTensor _gemm_output; GCTensor _tmp_output; - bool _append_bias; - bool _is_fully_connected_convolution; - bool _are_weights_reshaped; + bool _is_first_run; bool _is_activationlayer_enabled; }; } diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h index 31ad0abaa0..a1d6c8a438 100644 --- a/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h @@ -69,6 +69,20 @@ public: * if the reshape of matrix B should happen only for the first run */ void configure(const IGCTensor *a, const IGCTensor *b, const IGCTensor *c, IGCTensor *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref GCGEMM. + * + * @param[in] a First input tensor (Matrix or Vector A). Data types supported: F16/F32 + * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a. + * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. + * @param[out] output Output tensor. Data type supported: same as @p a + * @param[in] alpha Weight of the matrix product + * @param[in] beta Weight of matrix C + * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and + * if the reshape of matrix B should happen only for the first run + * + * @return a status + */ + static Status validate(const ITensorInfo *a, const ITensorInfo *b, const IGCTensor *c, const ITensorInfo *output, const float alpha, const float beta, const GEMMInfo &gemm_info = GEMMInfo()); // Inherited methods overridden: void run() override; @@ -83,6 +97,8 @@ private: GCTensor _tmp_b; bool _is_interleaved_transposed; bool _run_addition; + bool _is_first_run; + bool _reshape_b_only_on_first_run; }; } -- cgit v1.2.1