aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-07-16 10:23:31 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-07-27 17:29:31 +0000
commitd87aded57efb2997d486ffae9102eb79def60c99 (patch)
tree960eda814ef8002cf880d1e0798583590471d6b8
parent4718706b1141d5cccb006a7f86d65c1fde6c54ff (diff)
downloadComputeLibrary-d87aded57efb2997d486ffae9102eb79def60c99.tar.gz
Port CLGEMMConvolutionLayer
Details: port CLWeightsReshapeKernel to ClWeightsReshapeKernel port CLGEMMConvolutionLayer to ClGemmConvolution Resolves: COMPMID-4515 Change-Id: I7d5b4ec72db2742f6eb9f3ffc88f717c35b4f2a3 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5983 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp3
-rw-r--r--arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h209
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h2
-rw-r--r--docs/user_guide/release_version_and_change_log.dox2
-rw-r--r--filelist.json5
-rw-r--r--src/core/CL/CLKernels.h1
-rw-r--r--src/core/CL/kernels/CLWeightsReshapeKernel.h121
-rw-r--r--src/core/gpu/cl/kernels/ClWeightsReshapeKernel.cpp (renamed from src/core/CL/kernels/CLWeightsReshapeKernel.cpp)79
-rw-r--r--src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h93
-rw-r--r--src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp661
-rw-r--r--src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp1
-rw-r--r--src/runtime/gpu/cl/operators/ClGemm.cpp26
-rw-r--r--src/runtime/gpu/cl/operators/ClGemm.h4
-rw-r--r--src/runtime/gpu/cl/operators/ClGemmConvolution.cpp628
-rw-r--r--src/runtime/gpu/cl/operators/ClGemmConvolution.h185
-rw-r--r--tests/validation/CL/UNIT/DynamicTensor.cpp1
-rw-r--r--tests/validation/CL/WeightsReshape.cpp20
-rw-r--r--tests/validation/fixtures/WeightsReshapeFixture.h16
21 files changed, 1060 insertions, 1010 deletions
diff --git a/Android.bp b/Android.bp
index 6507f7037a..0502e841f1 100644
--- a/Android.bp
+++ b/Android.bp
@@ -121,7 +121,6 @@ cc_library_static {
"src/core/CL/kernels/CLStackLayerKernel.cpp",
"src/core/CL/kernels/CLStridedSliceKernel.cpp",
"src/core/CL/kernels/CLTileKernel.cpp",
- "src/core/CL/kernels/CLWeightsReshapeKernel.cpp",
"src/core/CPP/CPPTypes.cpp",
"src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp",
"src/core/CPP/kernels/CPPNonMaximumSuppressionKernel.cpp",
@@ -367,6 +366,7 @@ cc_library_static {
"src/core/gpu/cl/kernels/ClScaleKernel.cpp",
"src/core/gpu/cl/kernels/ClSoftmaxKernel.cpp",
"src/core/gpu/cl/kernels/ClTransposeKernel.cpp",
+ "src/core/gpu/cl/kernels/ClWeightsReshapeKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp",
"src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp",
@@ -673,6 +673,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClFlatten.cpp",
"src/runtime/gpu/cl/operators/ClFloor.cpp",
"src/runtime/gpu/cl/operators/ClGemm.cpp",
+ "src/runtime/gpu/cl/operators/ClGemmConvolution.cpp",
"src/runtime/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp",
"src/runtime/gpu/cl/operators/ClGemmLowpOutputStage.cpp",
"src/runtime/gpu/cl/operators/ClLogicalNot.cpp",
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
index 2dd4cd4bf5..8ad805492d 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
@@ -65,7 +65,7 @@ public:
* @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
* @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+ * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
*
*/
void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info = WeightsInfo());
@@ -77,7 +77,7 @@ public:
* @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
* @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+ * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
*
*/
void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info,
@@ -89,7 +89,7 @@ public:
* @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
* @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
* @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+ * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
*
* @return a status
*/
diff --git a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h
index 567de13508..d0a61cdd36 100644
--- a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h
@@ -108,7 +108,7 @@ public:
* Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
* @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+ * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
*
*/
void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, const WeightsInfo &weights_info = WeightsInfo());
@@ -122,7 +122,7 @@ public:
* Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
* @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+ * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
*
*/
void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info,
@@ -136,7 +136,7 @@ public:
* Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
* @param[in] output Output tensor info. The output has the same number of dimensions as the @p input.
* @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
+ * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref opencl::kernels::ClWeightsReshapeKernel.
*
* @return a status
*/
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index e262409ee7..3075465ef7 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -24,160 +24,24 @@
#ifndef ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H
#define ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H
-#include "arm_compute/runtime/IFunction.h"
-
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
-#include "arm_compute/runtime/CL/functions/CLGEMM.h"
-#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/CL/CLTypes.h"
+#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
-#include "arm_compute/runtime/ITransformWeights.h"
#include "arm_compute/runtime/IWeightsManager.h"
-#include "arm_compute/runtime/MemoryGroup.h"
#include <memory>
namespace arm_compute
{
-class CLWeightsReshapeKernel;
+// Forward declarations
+class CLCompileContext;
class ICLTensor;
-namespace opencl
-{
-namespace kernels
-{
-class ClIm2ColKernel;
-class ClCol2ImKernel;
-} // namespace kernels
-} // namespace opencl
-
-/** Function to reshape and transpose the weights. This function calls the following kernels:
- * -# @ref CLWeightsReshapeKernel
- */
-class CLConvolutionLayerReshapeWeights : public IFunction
-{
-public:
- /** Constructor */
- CLConvolutionLayerReshapeWeights();
- /** Prevent instances of this class from being copied */
- CLConvolutionLayerReshapeWeights(const CLConvolutionLayerReshapeWeights &) = delete;
- /** Prevent instances of this class from being copied */
- CLConvolutionLayerReshapeWeights &operator=(const CLConvolutionLayerReshapeWeights &) = delete;
- /** Default move constructor */
- CLConvolutionLayerReshapeWeights(CLConvolutionLayerReshapeWeights &&) = default;
- /** Default move assignment operator */
- CLConvolutionLayerReshapeWeights &operator=(CLConvolutionLayerReshapeWeights &&) = default;
- /** Default destructor */
- ~CLConvolutionLayerReshapeWeights();
- /** Set the input and output tensors.
- *
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
- * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
- * @param[out] output Destination tensor. Data types supported: Same as @p weights.
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- */
- void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
- /** Set the input and output tensors.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
- * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
- * @param[out] output Destination tensor. Data types supported: Same as @p weights.
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
- /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayerReshapeWeights
- *
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
- * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
- * @param[in] output Destination tensor. Data types supported: Same as @p weights.
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1);
- // Inherited methods overridden:
- void run() override;
-
-private:
- std::unique_ptr<CLWeightsReshapeKernel> _weights_reshape_kernel;
-};
-
-namespace weights_transformations
-{
-/** Basic function to manage the reshape weights generated from @ref CLConvolutionLayerReshapeWeights */
-class CLConvolutionLayerReshapeWeightsTransform : public ITransformWeights
-{
-public:
- /** Configures the @ref CLConvolutionLayerReshapeWeights function
- *
- * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
- * @param[in] biases Biases tensor. Data type supported: same as @p input, S32 if @p input is quantized.
- * @param[in] num_groups Number of groups when performing a grouped convolution.
- */
- void configure(const ICLTensor *input, const ICLTensor *biases, unsigned int num_groups)
- {
- configure(CLKernelLibrary::get().get_compile_context(), input, biases, num_groups);
- }
- /** Configures the @ref CLConvolutionLayerReshapeWeights function
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32.
- * @param[in] biases Biases tensor. Data type supported: same as @p input, S32 if @p input is quantized.
- * @param[in] num_groups Number of groups when performing a grouped convolution.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, unsigned int num_groups)
- {
- _bias_bit = (biases != nullptr) ? 1 : 0;
- _num_groups = num_groups;
- _func.configure(compile_context, input, biases, &_output, num_groups);
- }
-
- //Inherited method override
- void run() override
- {
- _output.allocator()->allocate();
- _func.run();
- _reshape_run = true;
- }
-
- //Inherited method override
- ICLTensor *get_weights() override
- {
- return &_output;
- }
-
- //Inherited method override
- void release() override
- {
- _output.allocator()->free();
- }
-
- //Inherited method override
- uint32_t uid() override
- {
- return ((0x9) | (_bias_bit << 7) | (_num_groups << 8));
- }
-
-private:
- CLTensor _output{};
- CLConvolutionLayerReshapeWeights _func{};
- int32_t _bias_bit{ 0 };
- unsigned int _num_groups{ 0 };
-};
-} // namespace weights_transformations
+class ITensorInfo;
/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
*
- * -# @ref opencl::kernels::ClIm2ColKernel
- * -# @ref CLGEMM (if the data type is FP32 or FP16)
- * -# @ref CLGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8/QASYMM8_SIGNED)
- * -# @ref CLGEMMLowpOutputStage with QUANTIZE_DOWN_FIXEDPOINT type of quantization (if the data type is QASYMM8/QASYMM8_SIGNED)
- * -# @ref opencl::kernels::ClCol2ImKernel (if NCHW data layout)
+ * -# @ref opencl::ClGemmConvolution
*/
class CLGEMMConvolutionLayer : public IFunction
{
@@ -282,65 +146,8 @@ public:
void prepare() override;
private:
- /** Configures the appropriate matrix multiply routine
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or
- * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
- * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
- * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
- * @param[in, out] output Output tensor. Data types supported: same as @p input.
- * @param[in] gemmlowp_output_stage GEMMLowp output stage info
- * @param[in] gemm_3d_depth Depth of GEMM 3D
- * @param[in] act_info Activation to apply after the matrix multiplication
- */
- void configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
- int gemm_3d_depth, const ActivationLayerInfo &act_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
- *
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor info. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or
- * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
- * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
- * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
- * @param[in] output Output tensor info. Data types supported: same as @p input.
- * @param[in] gemmlowp_output_stage GEMMLowp output stage info
- * @param[in] gemm_3d_depth Depth of GEMM 3D
- * @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout.
- * @param[in] act_info Activation to apply after the matrix multiplication
- *
- * @return a status
- */
- static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
- int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info);
-
-private:
- MemoryGroup _memory_group;
- IWeightsManager *_weights_manager;
- CLConvolutionLayerReshapeWeights _reshape_weights;
- weights_transformations::CLConvolutionLayerReshapeWeightsTransform _reshape_weights_managed;
- std::unique_ptr<opencl::kernels::ClIm2ColKernel> _im2col_kernel;
- CLGEMM _mm_gemm;
- CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
- std::unique_ptr<opencl::kernels::ClCol2ImKernel> _col2im_kernel;
- CLActivationLayer _activationlayer_function;
-
- const ICLTensor *_original_weights;
- const ICLTensor *_input;
- const ICLTensor *_gemm_output_to_use;
- ICLTensor *_output;
-
- CLTensor _im2col_output;
- CLTensor _weights_reshaped;
- CLTensor _gemm_output;
-
- bool _skip_im2col;
- bool _skip_col2im;
- bool _is_quantized;
- bool _fuse_activation;
- bool _is_prepared;
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h
index 6e482c98e7..c985738a9c 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h
@@ -26,6 +26,8 @@
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLGEMM.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
#include "arm_compute/runtime/CL/functions/CLPermute.h"
#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index 45303e5d87..9dd5d19055 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -1313,7 +1313,7 @@ v17.06 Public major release
- CLDepthConcatenateLayerKernel / CLDepthConcatenateLayer
- CLHOGOrientationBinningKernel CLHOGBlockNormalizationKernel, CLHOGDetectorKernel / CLHOGDescriptor CLHOGDetector CLHOGGradient CLHOGMultiDetection
- CLLocallyConnectedMatrixMultiplyKernel / CLLocallyConnectedLayer
- - @ref CLWeightsReshapeKernel / @ref CLConvolutionLayerReshapeWeights
+ - CLWeightsReshapeKernel / CLConvolutionLayerReshapeWeights
- New C++ kernels:
- CPPDetectionWindowNonMaximaSuppressionKernel
- New Arm® Neon™ kernels / functions:
diff --git a/filelist.json b/filelist.json
index 56633e64d1..394ec0441a 100644
--- a/filelist.json
+++ b/filelist.json
@@ -260,7 +260,8 @@
"GEMM": {
"files": {
"operator": [
- "src/runtime/gpu/cl/operators/ClGemm.cpp"
+ "src/runtime/gpu/cl/operators/ClGemm.cpp",
+ "src/runtime/gpu/cl/operators/ClGemmConvolution.cpp"
],
"kernel": [
"src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp",
@@ -646,7 +647,7 @@
"WeightsReshape": {
"files": {
"kernel": [
- "src/core/CL/kernels/CLWeightsReshapeKernel.cpp"
+ "src/core/gpu/cl/kernels/ClWeightsReshapeKernel.cpp"
]
}
},
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index 6f6a8642e8..f9d560f1b7 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -66,6 +66,5 @@
#include "src/core/CL/kernels/CLStackLayerKernel.h"
#include "src/core/CL/kernels/CLStridedSliceKernel.h"
#include "src/core/CL/kernels/CLTileKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
#endif /* ARM_COMPUTE_CLKERNELS_H */
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.h b/src/core/CL/kernels/CLWeightsReshapeKernel.h
deleted file mode 100644
index 9ac60a7a1a..0000000000
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H
-#define ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H
-
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-/** OpenCL kernel to perform reshaping on the weights used by convolution and locally connected layer
- *
- * Rearranges each 3-dimensional kernel to a single row leading to a matrix with linearized kernels.
- * In combination with the @ref opencl::kernels::ClIm2ColKernel can transform a convolution to a matrix multiplication.
- *
- * For example assuming a 3D weight kernel of 3x3 dimensions and depth of 2 we have:
- * @f[
- * \left( \begin{array}{ccc}
- * a000 & a001 & a002 \\
- * a010 & a011 & a012 \\
- * a020 & a021 & a022 \\
- * \end{array} \right)
- * \left( \begin{array}{ccc}
- * a100 & a101 & a102 \\
- * a110 & a111 & a112 \\
- * a120 & a121 & a122 \\
- * \end{array} \right)
- * \rightarrow
- * \left( \begin{array}{ccccccccc}
- * a000 & a001 & a002 & a010 & a011 & a012 & a020 & a021 & a022 & a100 & a101 & a102 & a110 & a111 & a112 & a120 & a121 & a122 \\
- * \end{array} \right)
- * @f]
- */
-class CLWeightsReshapeKernel : public ICLKernel
-{
-public:
- /** Constructor.*/
- CLWeightsReshapeKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWeightsReshapeKernel(const CLWeightsReshapeKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLWeightsReshapeKernel &operator=(const CLWeightsReshapeKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLWeightsReshapeKernel(CLWeightsReshapeKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLWeightsReshapeKernel &operator=(CLWeightsReshapeKernel &&) = default;
- /** Default destructor */
- ~CLWeightsReshapeKernel() = default;
- /** Set the input and output of the kernel.
- *
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
- * Data types supported: Same as @p input
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
- */
- void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
- /** Set the input and output of the kernel.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
- * Data types supported: Same as @p input
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1);
- /** Static function to check if given info will lead to a valid configuration of @ref CLWeightsReshapeKernel
- *
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[in] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
- * Data types supported: Same as @p input
- * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
- * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
-
-private:
- const ICLTensor *_input;
- const ICLTensor *_biases;
- ICLTensor *_output;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp b/src/core/gpu/cl/kernels/ClWeightsReshapeKernel.cpp
index 45e3505d0f..e3629f7706 100644
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClWeightsReshapeKernel.cpp
@@ -21,18 +21,22 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
+#include "src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "support/Cast.h"
#include "support/StringSupport.h"
namespace arm_compute
{
-using namespace arm_compute::misc::shape_calculator;
-
+using namespace misc::shape_calculator;
+namespace opencl
+{
+namespace kernels
+{
namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
@@ -66,36 +70,23 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, c
}
} // namespace
-CLWeightsReshapeKernel::CLWeightsReshapeKernel()
- : _input(nullptr), _biases(nullptr), _output(nullptr)
+ClWeightsReshapeKernel::ClWeightsReshapeKernel()
{
_type = CLKernelType::ELEMENTWISE;
}
-void CLWeightsReshapeKernel::configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, biases, output, num_groups);
-}
-
-void CLWeightsReshapeKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
+void ClWeightsReshapeKernel::configure(const ClCompileContext &compile_context, const ITensorInfo *src, const ITensorInfo *biases, ITensorInfo *dst, unsigned int num_groups)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
// Output tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_weights_reshaped_shape(*input->info(), (biases != nullptr), num_groups)));
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_weights_reshaped_shape(*src, (biases != nullptr), num_groups)));
// Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
- (biases != nullptr) ? biases->info() : nullptr,
- output->info(), num_groups));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, biases, dst, num_groups));
+ auto padding_info = get_padding_info({ src, biases, dst });
- auto padding_info = get_padding_info({ input, biases, output });
-
- const DataType data_type = input->info()->data_type();
-
- _biases = biases;
- _output = output;
- _input = input;
+ const DataType data_type = src->data_type();
// Create build options
CLBuildOptions build_opts;
@@ -107,25 +98,29 @@ void CLWeightsReshapeKernel::configure(const CLCompileContext &compile_context,
_kernel = create_kernel(compile_context, "reshape_to_columns", build_opts.options());
// Configure window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*src, Steps());
ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-Status CLWeightsReshapeKernel::validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
+Status ClWeightsReshapeKernel::validate(const ITensorInfo *src, const ITensorInfo *biases, const ITensorInfo *dst, unsigned int num_groups)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, biases, output, num_groups));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, biases, dst, num_groups));
return Status{};
}
-void CLWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
+void ClWeightsReshapeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
+ auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+ auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_BIAS));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
Window out_window;
- out_window.use_tensor_dimensions(_output->info()->tensor_shape());
+ out_window.use_tensor_dimensions(dst->info()->tensor_shape());
Window in_slice = window.first_slice_window_3D();
Window out_slice = out_window.first_slice_window_2D();
@@ -134,16 +129,16 @@ void CLWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
Window biases_slice;
unsigned int idx = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor();
- idx += (_biases != nullptr) ? num_arguments_per_1D_tensor() : 0;
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(0));
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(1));
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(2));
- _kernel.setArg<cl_uint>(idx++, _input->info()->dimension(3));
- _kernel.setArg<cl_uint>(idx++, _output->info()->strides_in_bytes().z());
-
- if(_biases != nullptr)
+ idx += (biases != nullptr) ? num_arguments_per_1D_tensor() : 0;
+ _kernel.setArg<cl_uint>(idx++, src->info()->dimension(0));
+ _kernel.setArg<cl_uint>(idx++, src->info()->dimension(1));
+ _kernel.setArg<cl_uint>(idx++, src->info()->dimension(2));
+ _kernel.setArg<cl_uint>(idx++, src->info()->dimension(3));
+ _kernel.setArg<cl_uint>(idx++, dst->info()->strides_in_bytes().z());
+
+ if(biases != nullptr)
{
- biases_window.use_tensor_dimensions(_biases->info()->tensor_shape());
+ biases_window.use_tensor_dimensions(biases->info()->tensor_shape());
biases_slice = biases_window.first_slice_window_1D();
}
@@ -151,11 +146,11 @@ void CLWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
{
// Set arguments
unsigned idx = 0;
- add_3D_tensor_argument(idx, _input, in_slice);
- add_2D_tensor_argument(idx, _output, out_slice);
- if(_biases != nullptr)
+ add_3D_tensor_argument(idx, src, in_slice);
+ add_2D_tensor_argument(idx, dst, out_slice);
+ if(biases != nullptr)
{
- add_1D_tensor_argument(idx, _biases, biases_slice);
+ add_1D_tensor_argument(idx, biases, biases_slice);
ARM_COMPUTE_UNUSED(biases_window.slide_window_slice_1D(biases_slice));
}
@@ -164,4 +159,6 @@ void CLWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
}
while(window.slide_window_slice_4D(in_slice) && out_window.slide_window_slice_2D(out_slice));
}
+} // namespace kernels
+} // namespace opencl
} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h b/src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h
new file mode 100644
index 0000000000..de2f2d10cc
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_WEIGHTSRESHAPE_KERNEL_H
+#define ARM_COMPUTE_CL_WEIGHTSRESHAPE_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** OpenCL kernel to perform reshaping on the weights used by convolution and locally connected layer
+ *
+ * Rearranges each 3-dimensional kernel to a single row leading to a matrix with linearized kernels.
+ * In combination with the @ref opencl::kernels::ClIm2ColKernel can transform a convolution to a matrix multiplication.
+ *
+ * For example assuming a 3D weight kernel of 3x3 dimensions and depth of 2 we have:
+ * @f[
+ * \left( \begin{array}{ccc}
+ * a000 & a001 & a002 \\
+ * a010 & a011 & a012 \\
+ * a020 & a021 & a022 \\
+ * \end{array} \right)
+ * \left( \begin{array}{ccc}
+ * a100 & a101 & a102 \\
+ * a110 & a111 & a112 \\
+ * a120 & a121 & a122 \\
+ * \end{array} \right)
+ * \rightarrow
+ * \left( \begin{array}{ccccccccc}
+ * a000 & a001 & a002 & a010 & a011 & a012 & a020 & a021 & a022 & a100 & a101 & a102 & a110 & a111 & a112 & a120 & a121 & a122 \\
+ * \end{array} \right)
+ * @f]
+ */
+class ClWeightsReshapeKernel : public IClKernel
+{
+public:
+ ClWeightsReshapeKernel();
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWeightsReshapeKernel);
+ /** Set the input and output of the kernel.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The input tensor info to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
+ * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All
+ * @param[in] biases The shared biases tensor info to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
+ * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr.
+ * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
+ * @param[out] dst The output tensor info. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
+ * Data types supported: Same as @p input
+ * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout
+ * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it.
+ */
+ void configure(const ClCompileContext &compile_context, const ITensorInfo *src, const ITensorInfo *biases, ITensorInfo *dst, unsigned int num_groups = 1);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClWeightsReshapeKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *biases, const ITensorInfo *dst, unsigned int num_groups = 1);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CL_WEIGHTSRESHAPE_KERNEL_H */ \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
index 8d1a91e420..a476bb6d79 100644
--- a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include <memory>
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 16735dde0e..75ca77dbe2 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Utils.h"
@@ -30,10 +31,8 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "src/core/gpu/cl/kernels/ClCol2ImKernel.h"
-#include "src/core/gpu/cl/kernels/ClIm2ColKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
#include "support/Cast.h"
#include <cmath>
@@ -44,156 +43,30 @@ namespace arm_compute
{
using namespace arm_compute::misc::shape_calculator;
using namespace arm_compute::utils::cast;
+using namespace arm_compute::experimental;
-CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
- : _weights_reshape_kernel(std::make_unique<CLWeightsReshapeKernel>())
+struct CLGEMMConvolutionLayer::Impl
{
-}
-
-CLConvolutionLayerReshapeWeights::~CLConvolutionLayerReshapeWeights() = default;
-
-void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
-{
- configure(CLKernelLibrary::get().get_compile_context(), weights, biases, output, num_groups);
-}
-
-void CLConvolutionLayerReshapeWeights::configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
-{
- // Perform validation step
- ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
- (biases != nullptr) ? biases->info() : nullptr,
- output->info(),
- num_groups));
-
- const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
- const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
-
- _weights_reshape_kernel->configure(compile_context, weights, biases_to_use, output, num_groups);
-
- output->info()->set_quantization_info(weights->info()->quantization_info());
-}
-
-Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
-
- if(biases != nullptr)
- {
- const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
- ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
- if((output != nullptr) && (output->total_size() != 0))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
- CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
- }
-
- return Status{};
-}
-
-void CLConvolutionLayerReshapeWeights::run()
-{
- CLScheduler::get().enqueue(*_weights_reshape_kernel);
-}
+ const ITensor *weights{ nullptr };
+ std::unique_ptr<opencl::ClGemmConvolution> op{ nullptr };
+ ITensorPack run_pack{};
+ ITensorPack prep_pack{};
+ MemoryGroup memory_group{};
+ IWeightsManager *weights_manager{ nullptr };
+ MemoryRequirements aux_mem_req{};
+ WorkspaceData<CLTensor> workspace_tensors{};
+ bool is_prepared{ false };
+};
CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
- : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(nullptr), _mm_gemm(memory_manager, weights_manager),
- _mm_gemmlowp(memory_manager), _col2im_kernel(nullptr), _activationlayer_function(), _original_weights(nullptr), _input(nullptr), _gemm_output_to_use(nullptr), _output(nullptr), _im2col_output(),
- _weights_reshaped(), _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
+ : _impl(std::make_unique<Impl>())
{
+ _impl->memory_group = MemoryGroup(memory_manager);
+ _impl->weights_manager = weights_manager;
}
CLGEMMConvolutionLayer::~CLGEMMConvolutionLayer() = default;
-void CLGEMMConvolutionLayer::configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
- int gemm_3d_depth, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
- ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
-
- const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
- false, // is_b_reshaped
- true, // reshape_b_only_on_first_run
- gemm_3d_depth, // depth_output_gemm3d
- _skip_im2col, // reinterpret_input_as_3d
- false, // retain_internal_weights
- gemmlowp_output_stage, // gemmlowp_output_stage
- false, // fp_mixed_precision
- false, // fast_math
- true, // broadcast_bias
- act_info); // activation_info
-
- if(_is_quantized)
- {
- // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
- // Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info = input->info()->quantization_info();
- const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
-
- input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
- weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
-
- _mm_gemmlowp.configure(compile_context, input, weights, biases, output, gemm_info);
-
- // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
- input->info()->set_quantization_info(input_quantization_info);
- weights->info()->set_quantization_info(weights_quantization_info);
- }
- else
- {
- // Configure matrix multiply function
- _mm_gemm.configure(compile_context, input, weights, biases, output, 1.0f, 1.0f, gemm_info);
- }
-}
-
-Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
- const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
-{
- const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
-
- const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
- false, // is_b_reshaped
- true, // reshape_b_only_on_first_run
- gemm_3d_depth, // depth_output_gemm3d
- skip_im2col, // reinterpret_input_as_3d
- false, // retain_internal_weights
- gemmlowp_output_stage, // gemmlowp_output_stage
- false, // fp_mixed_precision
- false, // fast_math
- true, // broadcast_bias
- act_info); // activation_info
-
- if(is_quantized)
- {
- // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
- // Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info = input->quantization_info();
- const QuantizationInfo weights_quantization_info = weights->quantization_info();
-
- std::unique_ptr<ITensorInfo> input_qa = input->clone();
- std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
- input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
- weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
-
- // Perform validation step on GEMMLowp
- return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
- }
- else
- {
- // Perform validation step on Matrix multiply function
- return CLGEMM::validate(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
- }
-}
-
void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
{
@@ -205,489 +78,61 @@ void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context,
const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
-
- ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
- weights->info(),
- biases != nullptr ? biases->info() : nullptr,
- output->info(),
- conv_info,
- weights_info,
- dilation,
- act_info,
- num_groups));
-
- const DataType data_type = input->info()->data_type();
- const DataLayout data_layout = input->info()->data_layout();
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
-
- const unsigned int kernel_width = weights->info()->dimension(idx_width);
- const unsigned int kernel_height = weights->info()->dimension(idx_height);
- const unsigned int num_kernels = weights->info()->dimension(idx_kernels);
-
- const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
-
- _is_prepared = weights_info.retain_internal_weights();
- _original_weights = weights;
- _input = input;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
- _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
- _skip_col2im = data_layout == DataLayout::NHWC;
-
- // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
- _fuse_activation = true;
-
- const ICLTensor *gemm_input_to_use = input;
- ICLTensor *gemm_output_to_use = output;
-
- // Get parameters from conv_info
- unsigned int stride_x = 0;
- unsigned int stride_y = 0;
- std::tie(stride_x, stride_y) = conv_info.stride();
-
- // Get convolved dimensions
- unsigned int conv_w = 0;
- unsigned int conv_h = 0;
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
- input->info()->dimension(idx_height),
- kernel_width,
- kernel_height,
- conv_info,
- dilation);
-
- unsigned int mat_weights_cols = num_kernels / num_groups;
-
- const ICLTensor *biases_to_use = biases;
- bool append_bias = false;
-
- ICLTensor *weights_to_use = &_weights_reshaped;
- if(num_groups != 1 && biases != nullptr)
- {
- // num_groups != 1 can only be for NCHW
- // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
- biases_to_use = nullptr;
- append_bias = true;
-
- if(_weights_manager && _weights_manager->are_weights_managed(weights))
- {
- _reshape_weights_managed.configure(compile_context, weights, biases, num_groups);
- weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
- }
- else
- {
- _reshape_weights.configure(compile_context, weights, biases, &_weights_reshaped, num_groups);
- }
- }
- else
- {
- if(_weights_manager && _weights_manager->are_weights_managed(weights))
- {
- _reshape_weights_managed.configure(compile_context, weights, nullptr, num_groups);
- weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
- }
- else
- {
- _reshape_weights.configure(compile_context, weights, nullptr, &_weights_reshaped, num_groups);
- }
- }
-
- // Create tensor to store im2col reshaped inputs
- if(!_skip_im2col)
- {
- _memory_group.manage(&_im2col_output);
-
- // Configure and tune im2col. im2col output shape is auto-initialized
- _im2col_kernel = std::make_unique<opencl::kernels::ClIm2ColKernel>();
-
- // Set the GPU target for im2col
- _im2col_kernel->set_target(CLScheduler::get().target());
- _im2col_kernel->configure(compile_context, input->info(), _im2col_output.info(), Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation, num_groups);
-
- // Set quantization info
- _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
- CLScheduler::get().tune_kernel_static(*_im2col_kernel);
-
- // Update GEMM input
- gemm_input_to_use = &_im2col_output;
- }
-
- // Create GEMM output tensor
- if(!_skip_col2im)
- {
- TensorShape shape_gemm;
-
- // If we cannot skip col2im it means we run im2col as well
- shape_gemm = _im2col_output.info()->tensor_shape();
- shape_gemm.set(0, mat_weights_cols);
- shape_gemm.set(1, conv_w * conv_h);
-
- TensorInfo info_gemm(shape_gemm, 1, data_type);
- info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
- _gemm_output.allocator()->init(info_gemm);
- _memory_group.manage(&_gemm_output);
-
- // Update GEMM output
- gemm_output_to_use = &_gemm_output;
- }
-
- GEMMLowpOutputStageInfo gemmlowp_output_stage;
- gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- gemmlowp_output_stage.gemmlowp_offset = 0;
-
- // Configure output stage for quantized case
- if(_is_quantized)
- {
- const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
- const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
-
- gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
-
- gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
- gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
- quantization::compute_quantized_multipliers_and_shifts(input->info(),
- weights->info(),
- output->info(),
- gemmlowp_output_stage.gemmlowp_multipliers.data(),
- gemmlowp_output_stage.gemmlowp_shifts.data());
- gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
- gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
-
- PixelValue min_val{};
- PixelValue max_val{};
- std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
-
- auto min_activation = min_val.get<int32_t>();
- auto max_activation = max_val.get<int32_t>();
-
- const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
- ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
- ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
- };
-
- if(act_info.enabled())
- {
- if(supported_acts.count(act_info.activation()) != 0)
- {
- std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
- }
- else
- {
- _fuse_activation = false;
- }
- }
-
- // Set the GEMMLowp output stage info
- gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
- gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
- gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
- }
-
- // Configure and tune GEMM
- // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
- const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
-
- configure_mm(compile_context, gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, act_info);
-
- if(!_skip_im2col)
- {
- _im2col_output.allocator()->allocate();
- }
-
- if(!_skip_col2im)
- {
- // Set the GPU target for col2im
- _col2im_kernel = std::make_unique<opencl::kernels::ClCol2ImKernel>();
- _col2im_kernel->set_target(CLScheduler::get().target());
- // Configure and tune Col2Im
- _col2im_kernel->configure(compile_context, gemm_output_to_use->info(), output->info(), Size2D(conv_w, conv_h), num_groups);
- CLScheduler::get().tune_kernel_static(*_col2im_kernel.get());
- _gemm_output_to_use = gemm_output_to_use;
- _output = output;
- }
-
- if(!_skip_col2im)
- {
- _gemm_output.allocator()->allocate();
- }
-
- ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
- "Output shape does not match the expected one");
-
- if(!_fuse_activation)
- {
- _activationlayer_function.configure(compile_context, output, nullptr, act_info);
- }
-
- ARM_COMPUTE_UNUSED(weights_info);
+ _impl->weights = weights;
+ _impl->op = std::make_unique<opencl::ClGemmConvolution>();
+ const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
+ _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv2d_info, weights_info);
+
+ _impl->run_pack =
+ {
+ { TensorType::ACL_SRC_0, input },
+ { TensorType::ACL_SRC_1, weights },
+ { TensorType::ACL_SRC_2, biases },
+ { TensorType::ACL_DST, output }
+ };
+ _impl->prep_pack =
+ {
+ { TensorType::ACL_SRC_1, weights },
+ { TensorType::ACL_SRC_2, biases },
+ };
+ _impl->aux_mem_req = _impl->op->workspace();
+ _impl->workspace_tensors = manage_workspace<CLTensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->prep_pack);
}
Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
-
- if(!is_quantized_per_channel)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
- ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
-
- const DataLayout data_layout = input->data_layout();
- const DataType data_type = input->data_type();
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
-
- const unsigned int kernel_width = weights->dimension(idx_width);
- const unsigned int kernel_height = weights->dimension(idx_height);
- const unsigned int num_kernels = weights->dimension(idx_kernels);
-
- TensorInfo im2col_reshaped_info{};
- TensorInfo info_gemm{};
- TensorInfo weights_reshaped_info{};
- const ITensorInfo *gemm_input_to_use = input;
- const ITensorInfo *gemm_output_to_use = output;
- const ITensorInfo *weights_to_use = weights;
- const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
- const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
- const bool skip_col2im = data_layout == DataLayout::NHWC;
- bool fuse_activation = true;
-
- ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
- ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
-
- // Validate biases
- if(biases != nullptr)
- {
- if(is_quantized)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
- }
- ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
- ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
- }
-
- if(act_info.enabled())
- {
- ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
- }
-
- // Get convolved dimensions
- unsigned int conv_w = 0;
- unsigned int conv_h = 0;
-
- std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
- input->dimension(idx_height),
- kernel_width,
- kernel_height,
- conv_info,
- dilation);
-
- unsigned int mat_weights_cols = num_kernels / num_groups;
-
- const ITensorInfo *biases_to_use = biases;
- bool append_bias = false;
-
- if(num_groups != 1 && biases != nullptr)
- {
- // num_groups != 1 can only be for NCHW
- // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
- biases_to_use = nullptr;
- append_bias = true;
-
- ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, biases, nullptr, num_groups));
- weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, num_groups), 1, data_type);
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr, num_groups));
- weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, num_groups), 1, data_type);
- }
-
- weights_to_use = &weights_reshaped_info;
-
- if(!skip_im2col)
- {
- const Size2D kernel_dims(kernel_width, kernel_height);
-
- // Output tensor auto initialization if not yet initialized
- TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
-
- auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
-
- ARM_COMPUTE_RETURN_ON_ERROR(opencl::kernels::ClIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
- gemm_input_to_use = &im2col_reshaped_info;
- }
-
- // Create GEMM output tensor
- if(!skip_col2im)
- {
- TensorShape shape_gemm;
-
- shape_gemm = gemm_input_to_use->tensor_shape();
- shape_gemm.set(0, mat_weights_cols);
- shape_gemm.set(1, conv_w * conv_h);
-
- info_gemm = TensorInfo(shape_gemm, 1, data_type);
- info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
- gemm_output_to_use = &info_gemm;
- }
-
- GEMMLowpOutputStageInfo gemmlowp_output_stage;
- gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- gemmlowp_output_stage.gemmlowp_offset = 0;
- gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
-
- if(is_quantized)
- {
- const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
- const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
- const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
-
- gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
- gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
- quantization::compute_quantized_multipliers_and_shifts(input,
- weights,
- output,
- gemmlowp_output_stage.gemmlowp_multipliers.data(),
- gemmlowp_output_stage.gemmlowp_shifts.data());
- gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
- gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
-
- int min_activation = 0;
- int max_activation = 0;
-
- const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
- ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
- ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
- };
-
- if(act_info.enabled())
- {
- if(supported_acts.count(act_info.activation()) != 0)
- {
- std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
- }
- else
- {
- fuse_activation = false;
- }
- }
-
- // Set the GEMMLowp output stage info
- gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
- gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
- gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
- }
-
- // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
- const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
-
- ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, act_info));
-
- // Validate Col2Im
- if(!skip_col2im)
- {
- ARM_COMPUTE_RETURN_ON_ERROR(opencl::kernels::ClCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
- }
-
- //Validate Activation Layer
- if(!fuse_activation)
- {
- ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
- }
-
- return Status{};
+ const Conv2dInfo conv2d_info = Conv2dInfo(conv_info, dilation, act_info, false, num_groups);
+ return opencl::ClGemmConvolution::validate(input, weights, biases, output, conv2d_info, weights_info);
}
void CLGEMMConvolutionLayer::run()
{
prepare();
-
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- // Run im2col
- if(!_skip_im2col)
- {
- ITensorPack pack =
- {
- { TensorType::ACL_SRC, _input },
- { TensorType::ACL_DST, &_im2col_output }
- };
- CLScheduler::get().enqueue_op(*_im2col_kernel, pack, false);
- }
-
- // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
- if(_is_quantized)
- {
- // Run gemmlowp
- _mm_gemmlowp.run();
- }
- else
- {
- // Run gemm
- _mm_gemm.run();
- }
-
- // Reshape output matrix
- if(!_skip_col2im)
- {
- ITensorPack pack =
- {
- { TensorType::ACL_SRC, _gemm_output_to_use },
- { TensorType::ACL_DST, _output }
- };
- CLScheduler::get().enqueue_op(*_col2im_kernel.get(), pack, false);
- }
-
- //Run Activation Layer if we cannot fuse in GEMM
- if(!_fuse_activation)
- {
- _activationlayer_function.run();
- }
+ MemoryGroupResourceScope scope_mg(_impl->memory_group);
+ _impl->op->run(_impl->run_pack);
}
void CLGEMMConvolutionLayer::prepare()
{
- if(!_is_prepared)
+ if(!_impl->is_prepared)
{
- ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
- if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
+ _impl->op->prepare(_impl->prep_pack);
+ auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
+ _impl->aux_mem_req.end(),
+ [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
+
+ if(has_reshape != std::end(_impl->aux_mem_req))
{
- _weights_manager->run(_original_weights, &_reshape_weights_managed);
+ _impl->weights->mark_as_unused();
}
else
{
- // Run weights reshaping and mark original weights tensor as unused
- _weights_reshaped.allocator()->allocate();
- _reshape_weights.run();
- _original_weights->mark_as_unused();
- }
-
- // Prepare GEMM
- _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
- if(!_weights_reshaped.is_used())
- {
- _weights_reshaped.allocator()->free();
+ // Pack the B matrix to be used as the underlying GEMM performs no reshapes
+ _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->weights);
}
-
- CLScheduler::get().queue().finish();
- _is_prepared = true;
+ release_temporaries(_impl->aux_mem_req, _impl->workspace_tensors);
+ _impl->is_prepared = true;
}
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
index 7b98b524c1..126a59e9f2 100644
--- a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
#include <tuple>
diff --git a/src/runtime/gpu/cl/operators/ClGemm.cpp b/src/runtime/gpu/cl/operators/ClGemm.cpp
index cb0eecae4b..2792dc470d 100644
--- a/src/runtime/gpu/cl/operators/ClGemm.cpp
+++ b/src/runtime/gpu/cl/operators/ClGemm.cpp
@@ -208,6 +208,7 @@ ClGemm::ClGemm()
_tmp_b(),
_reshape_b_only_on_first_run(false),
_gemm_kernel_type(CLGEMMKernelType::NATIVE_V1),
+ _is_prepared(false),
_aux_mem(AuxTensorIdx::Count)
{
}
@@ -696,6 +697,7 @@ void ClGemm::run(ITensorPack &tensors)
}
ITensorPack gemm_reshaped_pack{ { ACL_SRC_0, lhs_reshaped.get() }, { ACL_SRC_1, rhs_reshaped.get() }, { ACL_SRC_2, src2 }, { ACL_DST, dst } };
+
if(_gemm_kernel_type == CLGEMMKernelType::RESHAPED)
{
CLScheduler::get().enqueue_op(*_mm_reshaped_kernel, gemm_reshaped_pack, true);
@@ -740,19 +742,23 @@ void ClGemm::run(ITensorPack &tensors)
void ClGemm::prepare(ITensorPack &constants)
{
- const ITensor *src1 = constants.get_const_tensor(ACL_SRC_1);
- ICLTensor *rhs_aux = utils::cast::polymorphic_downcast<ICLTensor *>(constants.get_tensor(offset_int_vec(RhsReshape)));
-
- // If memory for RHS is persistent and src1 is provided re-transform else assume that RHS is transformed
- if((_aux_mem[AuxTensorIdx::RhsReshape].lifetime == MemoryLifetime::Persistent) && (src1 != nullptr && rhs_aux != nullptr) && rhs_aux)
+ if(!_is_prepared)
{
- ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL("Transforming RHS Matrix!");
+ const ITensor *src1 = constants.get_const_tensor(ACL_SRC_1);
+ ICLTensor *rhs_aux = utils::cast::polymorphic_downcast<ICLTensor *>(constants.get_tensor(offset_int_vec(RhsReshape)));
- CLAuxTensorHandler rhs_reshaped(_tmp_b, *rhs_aux);
- ARM_COMPUTE_ERROR_ON(rhs_reshaped.get()->cl_buffer().get() == nullptr);
+ // If memory for RHS is persistent and src1 is provided re-transform else assume that RHS is transformed
+ if((_aux_mem[AuxTensorIdx::RhsReshape].lifetime == MemoryLifetime::Persistent) && (src1 != nullptr && rhs_aux != nullptr) && rhs_aux)
+ {
+ ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL("Transforming RHS Matrix!");
- ITensorPack reshape_rhs_pack{ { ACL_SRC, src1 }, { ACL_DST, rhs_reshaped.get() } };
- CLScheduler::get().enqueue_op(*_reshape_rhs_kernel, reshape_rhs_pack, true);
+ CLAuxTensorHandler rhs_reshaped(_tmp_b, *rhs_aux);
+ ARM_COMPUTE_ERROR_ON(rhs_reshaped.get()->cl_buffer().get() == nullptr);
+
+ ITensorPack reshape_rhs_pack{ { ACL_SRC, src1 }, { ACL_DST, rhs_reshaped.get() } };
+ CLScheduler::get().enqueue_op(*_reshape_rhs_kernel, reshape_rhs_pack, true);
+ }
+ _is_prepared = true;
}
}
diff --git a/src/runtime/gpu/cl/operators/ClGemm.h b/src/runtime/gpu/cl/operators/ClGemm.h
index aad208bdb0..254344e862 100644
--- a/src/runtime/gpu/cl/operators/ClGemm.h
+++ b/src/runtime/gpu/cl/operators/ClGemm.h
@@ -129,8 +129,8 @@ private:
TensorInfo _tmp_b;
bool _reshape_b_only_on_first_run;
CLGEMMKernelType _gemm_kernel_type;
-
- experimental::MemoryRequirements _aux_mem{};
+ bool _is_prepared;
+ experimental::MemoryRequirements _aux_mem{};
};
} // namespace opencl
} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp b/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp
new file mode 100644
index 0000000000..1926cbbe4d
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClGemmConvolution.cpp
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClGemmConvolution.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Size2D.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/gpu/cl/kernels/ClActivationKernel.h"
+#include "src/core/gpu/cl/kernels/ClCol2ImKernel.h"
+#include "src/core/gpu/cl/kernels/ClIm2ColKernel.h"
+#include "src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/runtime/gpu/cl/operators/ClGemm.h"
+#include "src/runtime/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.h"
+#include "src/runtime/gpu/cl/utils/ClAuxTensorHandler.h"
+#include "support/Cast.h"
+
+namespace arm_compute
+{
+using namespace experimental;
+using namespace misc::shape_calculator;
+using namespace utils::cast;
+namespace opencl
+{
+ClGemmConvolution::ClGemmConvolution()
+ : _weights_reshape_kernel(nullptr), _im2col_kernel(nullptr), _mm_gemm(nullptr), _mm_gemmlowp(nullptr), _col2im_kernel(nullptr), _activation_kernel(nullptr), _im2col_output(), _weights_reshaped(),
+ _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _append_bias(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
+{
+}
+ClGemmConvolution::~ClGemmConvolution() = default;
+
+void ClGemmConvolution::configure_mm(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+ int gemm_3d_depth, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
+
+ const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
+ false, // is_b_reshaped
+ true, // reshape_b_only_on_first_run
+ gemm_3d_depth, // depth_output_gemm3d
+ _skip_im2col, // reinterpret_input_as_3d
+ false, // retain_internal_weights
+ gemmlowp_output_stage, // gemmlowp_output_stage
+ false, // fast_math
+ false, // fp_mixed_precision
+ true, // broadcast_bias
+ act_info); // activation_info
+
+ TensorInfo tmp_src{ *src };
+ if(_is_quantized)
+ {
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
+ // Extract and negate input and weights offset
+ const QuantizationInfo input_quantization_info = src->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights->quantization_info();
+
+ tmp_src.set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
+ weights->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
+
+ _mm_gemmlowp = std::make_unique<ClGemmLowpMatrixMultiplyCore>();
+ _mm_gemmlowp->configure(compile_context, &tmp_src, weights, biases, dst, gemm_info);
+
+ // Revert back QuantizatioInfo as weights could be used in other convolution layers
+ weights->set_quantization_info(weights_quantization_info);
+
+ auto mm_mem_req = _mm_gemmlowp->workspace();
+ for(unsigned int cont = 0; cont < mm_mem_req.size(); ++cont)
+ {
+ _aux_mem[cont] = mm_mem_req[cont];
+ }
+ }
+ else
+ {
+ // Configure matrix multiply function
+ _mm_gemm = std::make_unique<ClGemm>();
+ _mm_gemm->configure(compile_context, &tmp_src, weights, biases, dst, 1.0f, 1.0f, gemm_info);
+ auto mm_mem_req = _mm_gemm->workspace();
+ for(unsigned int cont = 0; cont < mm_mem_req.size(); ++cont)
+ {
+ _aux_mem[cont] = mm_mem_req[cont];
+ }
+ }
+}
+
+Status ClGemmConvolution::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
+{
+ const bool is_quantized = is_data_type_quantized_asymmetric(src->data_type());
+
+ const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
+ false, // is_b_reshaped
+ true, // reshape_b_only_on_first_run
+ gemm_3d_depth, // depth_output_gemm3d
+ skip_im2col, // reinterpret_input_as_3d
+ false, // retain_internal_weights
+ gemmlowp_output_stage, // gemmlowp_output_stage
+ false, // fast_math
+ false, // fp_mixed_precision
+ true, // broadcast_bias
+ act_info); // activation_info
+
+ if(is_quantized)
+ {
+ // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
+ // Extract and negate input and weights offset
+ const QuantizationInfo input_quantization_info = src->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights->quantization_info();
+
+ std::unique_ptr<ITensorInfo> src_qa = src->clone();
+ std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
+ src_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
+ weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
+
+ // Perform validation step on GEMMLowp
+ return ClGemmLowpMatrixMultiplyCore::validate(src_qa.get(), weights_qa.get(), biases, dst, gemm_info);
+ }
+ else
+ {
+ // Perform validation step on Matrix multiply function
+ return ClGemm::validate(src, weights, biases, dst, 1.0f, 1.0f, gemm_info);
+ }
+}
+
+void ClGemmConvolution::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const Conv2dInfo &conv2d_info, const WeightsInfo &weights_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
+
+ ARM_COMPUTE_ERROR_THROW_ON(ClGemmConvolution::validate(src, weights, biases, dst,
+ conv2d_info,
+ weights_info));
+
+ const DataType data_type = src->data_type();
+ const DataLayout data_layout = src->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ const unsigned int kernel_width = weights->dimension(idx_width);
+ const unsigned int kernel_height = weights->dimension(idx_height);
+ const unsigned int num_kernels = weights->dimension(idx_kernels);
+
+ const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
+
+ _is_prepared = weights_info.retain_internal_weights();
+ _is_quantized = is_data_type_quantized_asymmetric(src->data_type());
+ _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv2d_info.conv_info.stride().first == 1 && conv2d_info.conv_info.stride().second == 1);
+ _skip_col2im = data_layout == DataLayout::NHWC;
+
+ // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
+ _fuse_activation = true;
+
+ const ITensorInfo *gemm_input_to_use = src;
+ ITensorInfo *gemm_output_to_use = dst;
+
+ // Get parameters from conv_info
+ unsigned int stride_x = 0;
+ unsigned int stride_y = 0;
+ std::tie(stride_x, stride_y) = conv2d_info.conv_info.stride();
+
+ // Get convolved dimensions
+ unsigned int conv_w = 0;
+ unsigned int conv_h = 0;
+ std::tie(conv_w, conv_h) = scaled_dimensions(src->dimension(idx_width),
+ src->dimension(idx_height),
+ kernel_width,
+ kernel_height,
+ conv2d_info.conv_info,
+ conv2d_info.dilation);
+
+ unsigned int mat_weights_cols = num_kernels / conv2d_info.num_groups;
+
+ ITensorInfo *biases_to_use = biases;
+ _append_bias = false;
+
+ _weights_reshape_kernel = std::make_unique<kernels::ClWeightsReshapeKernel>();
+ if(conv2d_info.num_groups != 1 && biases != nullptr)
+ {
+ // num_groups != 1 can only be for NCHW
+ // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
+ biases_to_use = nullptr;
+ _append_bias = true;
+ _weights_reshape_kernel->configure(compile_context, weights, biases, &_weights_reshaped, conv2d_info.num_groups);
+ }
+ else
+ {
+ _weights_reshape_kernel->configure(compile_context, weights, nullptr, &_weights_reshaped, conv2d_info.num_groups);
+ }
+
+ // Create tensor to store im2col reshaped inputs
+ if(!_skip_im2col)
+ {
+ // Configure and tune im2col. im2col output shape is auto-initialized
+ _im2col_kernel = std::make_unique<opencl::kernels::ClIm2ColKernel>();
+
+ // Set the GPU target for im2col
+ _im2col_kernel->set_target(CLScheduler::get().target());
+ _im2col_kernel->configure(compile_context, src, &_im2col_output, Size2D(kernel_width, kernel_height), conv2d_info.conv_info, _append_bias, conv2d_info.dilation, conv2d_info.num_groups);
+
+ // Set quantization info
+ _im2col_output.set_quantization_info(src->quantization_info());
+ CLScheduler::get().tune_kernel_static(*_im2col_kernel);
+
+ // Update GEMM input
+ gemm_input_to_use = &_im2col_output;
+ }
+
+ // Create GEMM output tensor
+ if(!_skip_col2im)
+ {
+ TensorShape shape_gemm;
+
+ // If we cannot skip col2im it means we run im2col as well
+ shape_gemm = _im2col_output.tensor_shape();
+ shape_gemm.set(0, mat_weights_cols);
+ shape_gemm.set(1, conv_w * conv_h);
+
+ _gemm_output = TensorInfo(shape_gemm, 1, data_type);
+ _gemm_output.set_quantization_info(dst->quantization_info()).set_data_layout(src->data_layout());
+
+ // Update GEMM output
+ gemm_output_to_use = &_gemm_output;
+ }
+
+ GEMMLowpOutputStageInfo gemmlowp_output_stage;
+ gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ gemmlowp_output_stage.gemmlowp_offset = 0;
+
+ // Configure output stage for quantized case
+ if(_is_quantized)
+ {
+ const auto output_quant_info = (dst->total_size() == 0) ? iq_info : oq_info;
+ const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
+ const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
+
+ gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
+
+ gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
+ gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
+ quantization::compute_quantized_multipliers_and_shifts(src, weights, dst,
+ gemmlowp_output_stage.gemmlowp_multipliers.data(),
+ gemmlowp_output_stage.gemmlowp_shifts.data());
+ gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
+ gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
+
+ PixelValue min_val{};
+ PixelValue max_val{};
+ std::tie(min_val, max_val) = get_min_max(dst->data_type());
+
+ auto min_activation = min_val.get<int32_t>();
+ auto max_activation = max_val.get<int32_t>();
+
+ const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
+ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
+ };
+
+ if(conv2d_info.act_info.enabled())
+ {
+ if(supported_acts.count(conv2d_info.act_info.activation()) != 0)
+ {
+ std::tie(min_activation, max_activation) = get_quantized_activation_min_max(conv2d_info.act_info, data_type, output_quant_info);
+ }
+ else
+ {
+ _fuse_activation = false;
+ }
+ }
+
+ // Set the GEMMLowp output stage info
+ gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
+ gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
+ gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
+ }
+
+ // Configure and tune GEMM
+ // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
+ const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
+
+ configure_mm(compile_context, gemm_input_to_use, &_weights_reshaped, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, conv2d_info.act_info);
+
+ if(!_skip_col2im)
+ {
+ // Set the GPU target for col2im
+ _col2im_kernel = std::make_unique<opencl::kernels::ClCol2ImKernel>();
+ _col2im_kernel->set_target(CLScheduler::get().target());
+ // Configure and tune Col2Im
+ _col2im_kernel->configure(compile_context, gemm_output_to_use, dst, Size2D(conv_w, conv_h), conv2d_info.num_groups);
+ CLScheduler::get().tune_kernel_static(*_col2im_kernel.get());
+ }
+
+ ARM_COMPUTE_ERROR_ON_MSG((dst->dimension(idx_width) != conv_w) || (dst->dimension(idx_height) != conv_h),
+ "Output shape does not match the expected one");
+
+ if(!_fuse_activation)
+ {
+ _activation_kernel = std::make_unique<opencl::kernels::ClActivationKernel>();
+ _activation_kernel->configure(compile_context, dst, nullptr, conv2d_info.act_info);
+ }
+
+ _aux_mem[Im2ColOutput] = MemoryInfo(offset_int_vec(Im2ColOutput), MemoryLifetime::Temporary, _im2col_output.total_size());
+ _aux_mem[WeightsReshaped] = MemoryInfo(offset_int_vec(WeightsReshaped), MemoryLifetime::Persistent, _weights_reshaped.total_size());
+ _aux_mem[GemmOutput] = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
+}
+
+Status ClGemmConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
+ const WeightsInfo &weights_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
+ const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
+
+ if(!is_quantized_per_channel)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((conv2d_info.num_groups != 1) && (src->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((conv2d_info.num_groups != 1) && (src->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
+ ARM_COMPUTE_RETURN_ERROR_ON(((src->dimension(2) / weights->dimension(2)) != conv2d_info.num_groups) && (src->data_layout() == DataLayout::NCHW));
+
+ const DataLayout data_layout = src->data_layout();
+ const DataType data_type = src->data_type();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ const unsigned int kernel_width = weights->dimension(idx_width);
+ const unsigned int kernel_height = weights->dimension(idx_height);
+ const unsigned int num_kernels = weights->dimension(idx_kernels);
+
+ TensorInfo im2col_reshaped_info{};
+ TensorInfo info_gemm{};
+ TensorInfo weights_reshaped_info{};
+ const ITensorInfo *gemm_input_to_use = src;
+ const ITensorInfo *gemm_output_to_use = dst;
+ const ITensorInfo *weights_to_use = weights;
+ const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
+ const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv2d_info.conv_info.stride().first == 1
+ && conv2d_info.conv_info.stride().second == 1);
+ const bool skip_col2im = data_layout == DataLayout::NHWC;
+ bool fuse_activation = true;
+
+ ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * conv2d_info.num_groups) != src->dimension(idx_channel));
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
+
+ // Validate biases
+ if(biases != nullptr)
+ {
+ if(is_quantized)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
+ ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
+ }
+
+ if(conv2d_info.act_info.enabled())
+ {
+ ARM_COMPUTE_ERROR_ON(conv2d_info.act_info.b() > conv2d_info.act_info.a());
+ }
+
+ // Get convolved dimensions
+ unsigned int conv_w = 0;
+ unsigned int conv_h = 0;
+
+ std::tie(conv_w, conv_h) = scaled_dimensions(src->dimension(idx_width),
+ src->dimension(idx_height),
+ kernel_width,
+ kernel_height,
+ conv2d_info.conv_info,
+ conv2d_info.dilation);
+
+ unsigned int mat_weights_cols = num_kernels / conv2d_info.num_groups;
+
+ const ITensorInfo *biases_to_use = biases;
+ bool append_bias = false;
+
+ if(conv2d_info.num_groups != 1 && biases != nullptr)
+ {
+ // num_groups != 1 can only be for NCHW
+ // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
+ biases_to_use = nullptr;
+ append_bias = true;
+ weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, conv2d_info.num_groups), 1, data_type);
+ }
+ else
+ {
+ weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, conv2d_info.num_groups), 1, data_type);
+ }
+
+ weights_to_use = &weights_reshaped_info;
+
+ if(!skip_im2col)
+ {
+ const Size2D kernel_dims(kernel_width, kernel_height);
+
+ // Output tensor auto initialization if not yet initialized
+ TensorShape expected_output_shape = compute_im2col_conv_shape(src, kernel_dims, conv2d_info.conv_info, append_bias, conv2d_info.dilation, conv2d_info.num_groups == 1, conv2d_info.num_groups);
+
+ auto_init_if_empty(im2col_reshaped_info, src->clone()->set_tensor_shape(expected_output_shape));
+
+ ARM_COMPUTE_RETURN_ON_ERROR(opencl::kernels::ClIm2ColKernel::validate(src, &im2col_reshaped_info, kernel_dims, conv2d_info.conv_info, append_bias, conv2d_info.dilation, conv2d_info.num_groups));
+ gemm_input_to_use = &im2col_reshaped_info;
+ }
+
+ // Create GEMM output tensor
+ if(!skip_col2im)
+ {
+ TensorShape shape_gemm;
+
+ shape_gemm = gemm_input_to_use->tensor_shape();
+ shape_gemm.set(0, mat_weights_cols);
+ shape_gemm.set(1, conv_w * conv_h);
+
+ info_gemm = TensorInfo(shape_gemm, 1, data_type);
+ info_gemm.set_quantization_info(dst->quantization_info()).set_data_layout(src->data_layout());
+ gemm_output_to_use = &info_gemm;
+ }
+
+ GEMMLowpOutputStageInfo gemmlowp_output_stage;
+ gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ gemmlowp_output_stage.gemmlowp_offset = 0;
+ gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
+
+ if(is_quantized)
+ {
+ const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
+ const auto output_quant_info = (dst->total_size() == 0) ? iq_info : oq_info;
+ const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
+
+ gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
+ gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
+ quantization::compute_quantized_multipliers_and_shifts(src, weights, dst,
+ gemmlowp_output_stage.gemmlowp_multipliers.data(),
+ gemmlowp_output_stage.gemmlowp_shifts.data());
+ gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
+ gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
+
+ int min_activation = 0;
+ int max_activation = 0;
+
+ const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
+ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
+ };
+
+ if(conv2d_info.act_info.enabled())
+ {
+ if(supported_acts.count(conv2d_info.act_info.activation()) != 0)
+ {
+ std::tie(min_activation, max_activation) = get_quantized_activation_min_max(conv2d_info.act_info, data_type, output_quant_info);
+ }
+ else
+ {
+ fuse_activation = false;
+ }
+ }
+
+ // Set the GEMMLowp output stage info
+ gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
+ gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
+ gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
+ }
+
+ // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
+ const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
+
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, conv2d_info.act_info));
+
+ // Validate Col2Im
+ if(!skip_col2im)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClCol2ImKernel::validate(gemm_output_to_use, dst, Size2D(conv_w, conv_h), conv2d_info.num_groups));
+ }
+
+ //Validate Activation Layer
+ if(!fuse_activation)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClActivationKernel::validate(dst, nullptr, conv2d_info.act_info));
+ }
+
+ return Status{};
+}
+
+void ClGemmConvolution::run(ITensorPack &tensors)
+{
+ prepare(tensors);
+
+ auto src = tensors.get_const_tensor(ACL_SRC_0);
+ auto biases = tensors.get_const_tensor(ACL_SRC_2);
+ auto dst = tensors.get_tensor(ACL_DST);
+ auto gemm_input_to_use = src;
+ auto gemm_output_to_use = dst;
+
+ CLAuxTensorHandler im2col_output(offset_int_vec(Im2ColOutput), _im2col_output, tensors, false);
+ CLAuxTensorHandler gemm_output(offset_int_vec(GemmOutput), _gemm_output, tensors, false);
+ CLAuxTensorHandler weights_reshaped(offset_int_vec(WeightsReshaped), _weights_reshaped, tensors, false);
+
+ // Run im2col
+ if(!_skip_im2col)
+ {
+ ITensorPack pack =
+ {
+ { TensorType::ACL_SRC, src },
+ { TensorType::ACL_DST, im2col_output.get() }
+ };
+ CLScheduler::get().enqueue_op(*_im2col_kernel, pack, false);
+ gemm_input_to_use = im2col_output.get();
+ }
+ if(!_skip_col2im)
+ {
+ gemm_output_to_use = gemm_output.get();
+ }
+ ITensorPack pack_mm = tensors;
+ pack_mm.add_const_tensor(TensorType::ACL_SRC_0, gemm_input_to_use);
+ pack_mm.add_const_tensor(TensorType::ACL_SRC_1, weights_reshaped.get());
+ if(!_append_bias)
+ {
+ pack_mm.add_const_tensor(TensorType::ACL_SRC_2, biases);
+ }
+ pack_mm.add_tensor(TensorType::ACL_DST, gemm_output_to_use);
+ // Runs ClGemm or ClGemmLowpMatrixMultiplyCore functions
+ if(_is_quantized)
+ {
+ // Run gemmlowp
+ _mm_gemmlowp->run(pack_mm);
+ }
+ else
+ {
+ // Run gemm
+ _mm_gemm->run(pack_mm);
+ }
+
+ // Reshape output matrix
+ if(!_skip_col2im)
+ {
+ ITensorPack pack =
+ {
+ { TensorType::ACL_SRC, gemm_output_to_use },
+ { TensorType::ACL_DST, dst }
+ };
+ CLScheduler::get().enqueue_op(*_col2im_kernel.get(), pack, false);
+ }
+
+ //Run Activation Layer if we cannot fuse in GEMM
+ if(!_fuse_activation)
+ {
+ ITensorPack pack =
+ {
+ { TensorType::ACL_SRC, dst },
+ { TensorType::ACL_DST, dst }
+ };
+ CLScheduler::get().enqueue_op(*_activation_kernel.get(), pack, false);
+ }
+}
+
+void ClGemmConvolution::prepare(ITensorPack &tensors)
+{
+ if(!_is_prepared)
+ {
+ // Run weights reshaping and mark original weights tensor as unused
+ ICLTensor *weights_reshaped_p = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(offset_int_vec(WeightsReshaped)));
+ CLAuxTensorHandler weights_reshaped(_weights_reshaped, *weights_reshaped_p);
+ auto weights = tensors.get_const_tensor(TensorType::ACL_SRC_1);
+ ITensorPack pack =
+ {
+ { TensorType::ACL_SRC, weights },
+ { TensorType::ACL_DST, weights_reshaped.get() }
+ };
+
+ if(_append_bias)
+ {
+ const auto biases = tensors.get_const_tensor(TensorType::ACL_SRC_2);
+ pack.add_const_tensor(TensorType::ACL_BIAS, biases);
+ }
+ CLScheduler::get().enqueue_op(*_weights_reshape_kernel.get(), pack, true);
+ tensors.add_const_tensor(TensorType::ACL_SRC_1, weights_reshaped.get());
+
+ // Prepare GEMM
+ _is_quantized ? _mm_gemmlowp->prepare(tensors) : _mm_gemm->prepare(tensors);
+ _is_prepared = true;
+ }
+}
+experimental::MemoryRequirements ClGemmConvolution::workspace() const
+{
+ return _aux_mem;
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClGemmConvolution.h b/src/runtime/gpu/cl/operators/ClGemmConvolution.h
new file mode 100644
index 0000000000..444516eaaa
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClGemmConvolution.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_GEMMCONVOLUTION_H
+#define ARM_COMPUTE_CL_GEMMCONVOLUTION_H
+
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/FunctionDescriptors.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace opencl
+{
+class ClGemm;
+class ClGemmLowpMatrixMultiplyCore;
+namespace kernels
+{
+class ClIm2ColKernel;
+class ClCol2ImKernel;
+class ClWeightsReshapeKernel;
+class ClActivationKernel;
+} // namespace kernels
+
+/** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
+ *
+ * -# @ref opencl::kernels::ClIm2ColKernel
+ * -# @ref ClGemm (if the data type is FP32 or FP16)
+ * -# @ref CLGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8/QASYMM8_SIGNED)
+ * -# @ref ClGemmLowpOutputStage with QUANTIZE_DOWN_FIXEDPOINT type of quantization (if the data type is QASYMM8/QASYMM8_SIGNED)
+ * -# @ref opencl::kernels::ClCol2ImKernel (if NCHW data layout)
+ * -# @ref opencl::kernels::ClActivationKernel
+ */
+class ClGemmConvolution : public IClOperator
+{
+public:
+ /** Constructor */
+ ClGemmConvolution();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ClGemmConvolution(const ClGemmConvolution &) = delete;
+ /** Default move constructor */
+ ClGemmConvolution(ClGemmConvolution &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ ClGemmConvolution &operator=(const ClGemmConvolution &) = delete;
+ /** Default move assignment operator */
+ ClGemmConvolution &operator=(ClGemmConvolution &&) = default;
+ /**Default destructor */
+ ~ClGemmConvolution();
+ /** Set the input and output tensors.
+ *
+ * Valid data layouts:
+ * - NHWC
+ * - NCHW
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |src2 |dst |
+ * |:--------------|:------------------|:--------|:--------------|
+ * |F16 |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |F32 |
+ * |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
+ * |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
+ * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED |
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor info. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
+ * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
+ * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
+ * @param[out] dst Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv2d_info Contains convolution 2d info described in @ref Conv2dInfo.
+ * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights
+ * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input.
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const Conv2dInfo &conv2d_info,
+ const WeightsInfo &weights_info = WeightsInfo());
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClGemmConvolution::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const Conv2dInfo &conv2d_info,
+ const WeightsInfo &weights_info = WeightsInfo());
+
+ // Inherited methods overridden:
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &constants) override;
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ /** Configures the appropriate matrix multiply routine
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] weights Weights tensor info. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or
+ * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
+ * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
+ * @param[in, out] dst Output tensor info. Data types supported: same as @p input.
+ * @param[in] gemmlowp_output_stage GEMMLowp output stage info
+ * @param[in] gemm_3d_depth Depth of GEMM 3D
+ * @param[in] act_info Activation to apply after the matrix multiplication
+ */
+ void configure_mm(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+ int gemm_3d_depth, const ActivationLayerInfo &act_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
+ *
+ * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] weights Weights tensor info. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or
+ * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED.
+ * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type.
+ * @param[in] dst Output tensor info. Data types supported: same as @p input.
+ * @param[in] gemmlowp_output_stage GEMMLowp output stage info
+ * @param[in] gemm_3d_depth Depth of GEMM 3D
+ * @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout.
+ * @param[in] act_info Activation to apply after the matrix multiplication
+ *
+ * @return a status
+ */
+ static Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
+ int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info);
+
+ enum AuxTensorIdx
+ {
+ // ClGemmLowpMatrixMultiplyCore has up to 7 internal tensors
+ Im2ColOutput = 8,
+ WeightsReshaped,
+ GemmOutput,
+ Count
+ };
+
+ std::unique_ptr<kernels::ClWeightsReshapeKernel> _weights_reshape_kernel;
+ std::unique_ptr<kernels::ClIm2ColKernel> _im2col_kernel;
+ std::unique_ptr<ClGemm> _mm_gemm;
+ std::unique_ptr<ClGemmLowpMatrixMultiplyCore> _mm_gemmlowp;
+ std::unique_ptr<opencl::kernels::ClCol2ImKernel> _col2im_kernel;
+ std::unique_ptr<kernels::ClActivationKernel> _activation_kernel;
+
+ TensorInfo _im2col_output;
+ TensorInfo _weights_reshaped;
+ TensorInfo _gemm_output;
+
+ bool _skip_im2col;
+ bool _skip_col2im;
+ bool _is_quantized;
+ bool _fuse_activation;
+ bool _append_bias;
+ bool _is_prepared;
+
+ experimental::MemoryRequirements _aux_mem;
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_GEMMCONVOLUTION_H */
diff --git a/tests/validation/CL/UNIT/DynamicTensor.cpp b/tests/validation/CL/UNIT/DynamicTensor.cpp
index f83a92ec2f..ac433721d8 100644
--- a/tests/validation/CL/UNIT/DynamicTensor.cpp
+++ b/tests/validation/CL/UNIT/DynamicTensor.cpp
@@ -31,7 +31,6 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "src/core/CL/kernels/CLReductionOperationKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
#include "tests/AssetsLibrary.h"
#include "tests/CL/CLAccessor.h"
#include "tests/Globals.h"
diff --git a/tests/validation/CL/WeightsReshape.cpp b/tests/validation/CL/WeightsReshape.cpp
index d04c10cee2..93be75df98 100644
--- a/tests/validation/CL/WeightsReshape.cpp
+++ b/tests/validation/CL/WeightsReshape.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/core/Types.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
+#include "src/core/gpu/cl/kernels/ClWeightsReshapeKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/datasets/ShapeDatasets.h"
@@ -41,7 +41,7 @@ namespace validation
TEST_SUITE(CL)
TEST_SUITE(WeightsReshape)
-using CLWeightsReshape = CLSynthetizeFunction<CLWeightsReshapeKernel>;
+using ClWeightsReshape = ClSynthetizeOperatorWithBorder<opencl::kernels::ClWeightsReshapeKernel>;
/** Validate tests
*
@@ -87,15 +87,15 @@ framework::dataset::make("NumGroups", { 1, 1, 1, 2, 1, 2 })),
framework::dataset::make("Expected", { false, false, false, false, false, false })),
input_info, biases_info, output_info, num_groups, expected)
{
- bool status = bool(CLWeightsReshape::validate(&input_info, &biases_info, &output_info, num_groups));
+ bool status = bool(opencl::kernels::ClWeightsReshapeKernel::validate(&input_info, &biases_info, &output_info, num_groups));
ARM_COMPUTE_EXPECT(status == expected, framework::LogLevel::ERRORS);
}
template <typename T>
-using CLWeightsReshapeFixture = WeightsReshapeValidationFixture<CLTensor, CLAccessor, CLWeightsReshape, T>;
+using ClWeightsReshapeFixture = WeightsReshapeOpValidationFixture<CLTensor, CLAccessor, ClWeightsReshape, T>;
TEST_SUITE(Float)
-FIXTURE_DATA_TEST_CASE(FP32, CLWeightsReshapeFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(3U, 3U, 48U, 120U) }),
+FIXTURE_DATA_TEST_CASE(FP32, ClWeightsReshapeFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(3U, 3U, 48U, 120U) }),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("HasBias", { true, false })),
framework::dataset::make("NumGroups", { 1, 2 })))
@@ -104,7 +104,7 @@ FIXTURE_DATA_TEST_CASE(FP32, CLWeightsReshapeFixture<float>, framework::DatasetM
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(FP16, CLWeightsReshapeFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(13U, 13U, 96U, 240U) }),
+FIXTURE_DATA_TEST_CASE(FP16, ClWeightsReshapeFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(13U, 13U, 96U, 240U) }),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("HasBias", { true, false })),
framework::dataset::make("NumGroups", { 3, 4 })))
@@ -113,7 +113,7 @@ FIXTURE_DATA_TEST_CASE(FP16, CLWeightsReshapeFixture<half>, framework::DatasetMo
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(BFloat16, CLWeightsReshapeFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(9U, 9U, 96U, 240U) }),
+FIXTURE_DATA_TEST_CASE(BFloat16, ClWeightsReshapeFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(9U, 9U, 96U, 240U) }),
framework::dataset::make("DataType", DataType::BFLOAT16)),
framework::dataset::make("HasBias", { false })),
framework::dataset::make("NumGroups", { 3, 4 })))
@@ -125,7 +125,7 @@ FIXTURE_DATA_TEST_CASE(BFloat16, CLWeightsReshapeFixture<half>, framework::Datas
TEST_SUITE_END()
TEST_SUITE(Quantized)
-FIXTURE_DATA_TEST_CASE(QASYMM8, CLWeightsReshapeFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(5U, 5U, 48U, 120U) }),
+FIXTURE_DATA_TEST_CASE(QASYMM8, ClWeightsReshapeFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(5U, 5U, 48U, 120U) }),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("HasBias", { false })),
framework::dataset::make("NumGroups", { 1, 2 })))
@@ -134,7 +134,7 @@ FIXTURE_DATA_TEST_CASE(QASYMM8, CLWeightsReshapeFixture<uint8_t>, framework::Dat
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(QASYMM8_SIGNED, CLWeightsReshapeFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(5U, 5U, 48U, 120U) }),
+FIXTURE_DATA_TEST_CASE(QASYMM8_SIGNED, ClWeightsReshapeFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::make("InputShape", { TensorShape(5U, 5U, 48U, 120U) }),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
framework::dataset::make("HasBias", { false })),
framework::dataset::make("NumGroups", { 1, 2 })))
diff --git a/tests/validation/fixtures/WeightsReshapeFixture.h b/tests/validation/fixtures/WeightsReshapeFixture.h
index 0b3e76d677..7c7214acac 100644
--- a/tests/validation/fixtures/WeightsReshapeFixture.h
+++ b/tests/validation/fixtures/WeightsReshapeFixture.h
@@ -45,7 +45,7 @@ namespace validation
using namespace arm_compute::misc::shape_calculator;
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class WeightsReshapeValidationFixture : public framework::Fixture
+class WeightsReshapeOpValidationFixture : public framework::Fixture
{
public:
template <typename...>
@@ -73,7 +73,7 @@ protected:
// Create and configure function
FunctionType weights_reshape_func;
- weights_reshape_func.configure(&src, (has_bias ? &bias : nullptr), &dst, num_groups);
+ weights_reshape_func.configure(src.info(), (has_bias ? bias.info() : nullptr), dst.info(), num_groups);
ARM_COMPUTE_ASSERT(src.info()->is_resizable());
ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
@@ -99,8 +99,18 @@ protected:
fill(AccessorType(bias), 1);
}
+ arm_compute::ITensorPack pack =
+ {
+ { arm_compute::TensorType::ACL_SRC, &src },
+ { arm_compute::TensorType::ACL_DST, &dst }
+ };
+
+ if(has_bias)
+ {
+ pack.add_const_tensor(arm_compute::TensorType::ACL_BIAS, &bias);
+ }
// Compute function
- weights_reshape_func.run();
+ weights_reshape_func.run(pack);
return dst;
}