diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2018-04-13 14:28:08 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:49:54 +0000 |
commit | 164b65d3c8f61f1d6d404fb484c1998a20a2cbda (patch) | |
tree | b60b9f49066ca8c008726dd193e4e0bd56ac1168 /arm_compute | |
parent | 0cbb927ac309e332ac6e6f1ab9170f041f0138ab (diff) | |
download | ComputeLibrary-164b65d3c8f61f1d6d404fb484c1998a20a2cbda.tar.gz |
COMPMID-1043: Rework GCGEMMMatrixMultiplyKernel interface and allow auto initialization of the tensors
This patch also:
- removes support for already reshaped weights in GCConvolutionLayer
- makes GCConvolutionLayer similar to CLGEMMConvolutionLayer
- enables usage of the GCGEMM function in GCConvolution instead of calling the
GEMM kernels directly
Change-Id: I3e4a64335555e86e18585d38d8fda4bfdb44e265
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127696
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/CL/CLTypes.h | 22 | ||||
-rw-r--r-- | arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h | 23 | ||||
-rw-r--r-- | arm_compute/core/GPUTarget.h | 49 | ||||
-rw-r--r-- | arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h | 74 | ||||
-rw-r--r-- | arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h | 16 |
5 files changed, 136 insertions, 48 deletions
diff --git a/arm_compute/core/CL/CLTypes.h b/arm_compute/core/CL/CLTypes.h index ca487814a7..4a03cc9637 100644 --- a/arm_compute/core/CL/CLTypes.h +++ b/arm_compute/core/CL/CLTypes.h @@ -24,6 +24,8 @@ #ifndef __ARM_COMPUTE_CL_TYPES_H__ #define __ARM_COMPUTE_CL_TYPES_H__ +#include "arm_compute/core/GPUTarget.h" + #include <string> namespace arm_compute @@ -31,26 +33,6 @@ namespace arm_compute /** Default string for the CLKernel configuration id */ static const std::string default_config_id = "no_config_id"; -/** Available GPU Targets */ -enum class GPUTarget -{ - UNKNOWN = 0x101, - GPU_ARCH_MASK = 0xF00, - MIDGARD = 0x100, - BIFROST = 0x200, - T600 = 0x110, - T700 = 0x120, - T800 = 0x130, - G71 = 0x210, - G72 = 0x220, - G51 = 0x230, - G51BIG = 0x231, - G51LIT = 0x232, - TNOX = 0x240, - TTRX = 0x250, - TBOX = 0x260 -}; - /** Available OpenCL Version */ enum class CLVersion { diff --git a/arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h b/arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h index 3a0b22f148..cea03a9357 100644 --- a/arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h +++ b/arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #define __ARM_COMPUTE_GCGEMMMATRIXMULTIPLYKERNEL_H__ #include "arm_compute/core/GLES_COMPUTE/IGCKernel.h" +#include "arm_compute/core/GPUTarget.h" namespace arm_compute { @@ -32,9 +33,6 @@ class IGCTensor; /** GLES Compute kernel to multiply two input matrices "A" and "B" or to multiply a vector "A" by a matrix "B". All elements of the output matrix/vector will be multiplied by alpha * - * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref GCGEMMInterleave4x4Kernel" and @ref GCGEMMTranspose1xWKernel - * @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped - * * @attention The second input tensor must have at least 2 dimensions (matrix) * */ @@ -64,8 +62,23 @@ public: * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 * @param[in] alpha Weight of the matrix product * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref GCGEMMInterleave4x4Kernel and @ref GCGEMMTranspose1xWKernel + * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped + */ + void configure(const IGCTensor *input0, const IGCTensor *input1, IGCTensor *output, float alpha, bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref GCGEMMMatrixMultiplyKernel + * + * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32 + * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0 + * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 + * @param[in] alpha Weight of the matrix product + * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref GCGEMMInterleave4x4Kernel and @ref GCGEMMTranspose1xWKernel + * @param[in] reshape_info GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped + * @param[in] gpu_target GPU Target + * + * @return a status */ - void configure(const IGCTensor *input0, const IGCTensor *input1, IGCTensor *output, float alpha, bool is_interleaved_transposed = true); + static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, float alpha, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, + GPUTarget gpu_target); // Inherited methods overridden: void run(const Window &window) override; diff --git a/arm_compute/core/GPUTarget.h b/arm_compute/core/GPUTarget.h new file mode 100644 index 0000000000..8a5ca80f49 --- /dev/null +++ b/arm_compute/core/GPUTarget.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_GPUTARGET_H__ +#define __ARM_COMPUTE_GPUTARGET_H__ + +namespace arm_compute +{ +/** Available GPU Targets */ +enum class GPUTarget +{ + UNKNOWN = 0x101, + GPU_ARCH_MASK = 0xF00, + MIDGARD = 0x100, + BIFROST = 0x200, + T600 = 0x110, + T700 = 0x120, + T800 = 0x130, + G71 = 0x210, + G72 = 0x220, + G51 = 0x230, + G51BIG = 0x231, + G51LIT = 0x232, + TNOX = 0x240, + TTRX = 0x250, + TBOX = 0x260 +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_GPUTARGET_H__ */ diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h index 54b17b40bb..fa29f447c8 100644 --- a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h @@ -27,15 +27,13 @@ #include "arm_compute/core/GLES_COMPUTE/kernels/GCCol2ImKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h" -#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMInterleave4x4Kernel.h" -#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.h" -#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCIm2ColKernel.h" #include "arm_compute/core/GLES_COMPUTE/kernels/GCWeightsReshapeKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/GLES_COMPUTE/GCMemoryGroup.h" #include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h" #include "arm_compute/runtime/GLES_COMPUTE/functions/GCActivationLayer.h" +#include "arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h" #include "arm_compute/runtime/IFunction.h" #include <memory> @@ -46,7 +44,6 @@ class IGCTensor; /** Function to reshape and transpose the weights. This function calls the following kernels: * -# @ref GCWeightsReshapeKernel - * -# @ref GCGEMMTranspose1xWKernel */ class GCConvolutionLayerReshapeWeights : public IFunction { @@ -55,22 +52,18 @@ public: GCConvolutionLayerReshapeWeights(); /** Set the input and output tensors. * - * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. - * Data type supported: F16/F32. - * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. - * @param[out] output Destination tensor. Data types supported: Same as @p weights. - * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise. - * Data types supported: Same as @p weights. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. + * Data type supported: F16/F32. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. + * @param[out] output Destination tensor. Data types supported: Same as @p weights. */ - void configure(const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, bool transpose1xW); + void configure(const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output); // Inherited methods overridden: void run() override; private: - GCWeightsReshapeKernel _weights_reshape_kernel; - GCGEMMTranspose1xWKernel _weights_transposed_kernel; - GCTensor _weights_reshaped; - bool _transpose1xW; + GCWeightsReshapeKernel _weights_reshape_kernel; + GCTensor _weights_reshaped; }; /** Basic function to compute the convolution layer. This function calls the following GLES kernels: @@ -86,7 +79,14 @@ class GCConvolutionLayer : public IFunction public: /** Default constructor */ GCConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr); - + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GCConvolutionLayer(const GCConvolutionLayer &) = delete; + /** Default move constructor */ + GCConvolutionLayer(GCConvolutionLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + GCConvolutionLayer &operator=(const GCConvolutionLayer &) = delete; + /** Default move assignment operator */ + GCConvolutionLayer &operator=(GCConvolutionLayer &&) = default; /** Set the input and output tensors. * * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], @@ -105,6 +105,26 @@ public: */ void configure(const IGCTensor *input, const IGCTensor *weights, const IGCTensor *biases, IGCTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref GCConvolutionLayer. + * + * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: QS8/QASYMM8/QS16/F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type. + * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] weights_info Specifies if the weights tensor has been reshaped with GCWeightsReshapeKernel. If this is not part of the fully connected layer the weights + * tensor has also been transposed with GCGEMMTranspose1xWKernel. Data type supported: Same as @p input. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, + const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo()); // Inherited methods overridden: void run() override; @@ -115,20 +135,30 @@ private: * @param input Input tensor. Data types supported: F16/F32. * @param weights Weights tensor. Data type supported: Same as @p input. * @param output Output tensor. Data types supported: Same as @p input, - * @param is_interleaved_transposed Flag that signals if matrix is interleaved transposed */ - void configure_mm(const IGCTensor *input, const IGCTensor *weights, IGCTensor *output, bool is_interleaved_transposed = true); + void configure_mm(const IGCTensor *input, const IGCTensor *weights, IGCTensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref GCGEMMConvolutionLayer matrix multiply routines + * + * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32. + * @param[in] weights Weights tensor. Data type supported: Same as @p input. + * @param[in] output Output tensor. Data types supported: Same as @p input, + * except for input of QASYMM8 type where output should be of S32 type. + * + * @return a status + */ + static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output); private: GCMemoryGroup _memory_group; GCConvolutionLayerReshapeWeights _reshape_weights; GCIm2ColKernel _input_im2col_kernel; - GCGEMMInterleave4x4Kernel _input_interleave_kernel; - GCGEMMMatrixMultiplyKernel _mm_kernel; + GCGEMM _mm_gemm; GCCol2ImKernel _output_col2im_kernel; GCFillBorderKernel _fill_border; GCActivationLayer _activationlayer_function; + const IGCTensor *_original_weights; + GCTensor _input_im2col_reshaped; GCTensor _input_interleaved_reshaped; GCTensor _weights_reshaped; @@ -136,9 +166,7 @@ private: GCTensor _gemm_output; GCTensor _tmp_output; - bool _append_bias; - bool _is_fully_connected_convolution; - bool _are_weights_reshaped; + bool _is_first_run; bool _is_activationlayer_enabled; }; } diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h index 31ad0abaa0..a1d6c8a438 100644 --- a/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h +++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCGEMM.h @@ -69,6 +69,20 @@ public: * if the reshape of matrix B should happen only for the first run */ void configure(const IGCTensor *a, const IGCTensor *b, const IGCTensor *c, IGCTensor *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref GCGEMM. + * + * @param[in] a First input tensor (Matrix or Vector A). Data types supported: F16/F32 + * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a. + * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. + * @param[out] output Output tensor. Data type supported: same as @p a + * @param[in] alpha Weight of the matrix product + * @param[in] beta Weight of matrix C + * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and + * if the reshape of matrix B should happen only for the first run + * + * @return a status + */ + static Status validate(const ITensorInfo *a, const ITensorInfo *b, const IGCTensor *c, const ITensorInfo *output, const float alpha, const float beta, const GEMMInfo &gemm_info = GEMMInfo()); // Inherited methods overridden: void run() override; @@ -83,6 +97,8 @@ private: GCTensor _tmp_b; bool _is_interleaved_transposed; bool _run_addition; + bool _is_first_run; + bool _reshape_b_only_on_first_run; }; } |