aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-11-16 16:04:25 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2019-02-07 09:44:08 +0000
commitebc3a90721fe4a41b8e141466894d4d7185c01b7 (patch)
tree9149764caa37edbdc6bb6c69d503d37dbb28449f /arm_compute
parent4632e5e44e9a78b15884d0947007bb030fde0aea (diff)
downloadComputeLibrary-ebc3a90721fe4a41b8e141466894d4d7185c01b7.tar.gz
COMPMID-1706: Fuse the bias addition within CLGEMM
Change-Id: I378f2023f4fa010f195f76716ac07aa86279bfae Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/280 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h24
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h12
3 files changed, 25 insertions, 15 deletions
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
index 797bda86cf..724a7d67e6 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
@@ -30,12 +30,14 @@ namespace arm_compute
{
class ICLTensor;
-/** OpenCL kernel to multiply two input matrices "A" and "B" . All elements of the output matrix will be multiplied by alpha
+/** OpenCL kernel to multiply two input matrices "A" and "B" and add a vector "C" if provided. All elements of the output matrix will be multiplied by alpha. In case vector C is passed, it will be added to the previous result (a broadcast addition will be performed).
*
* @note If the input tensors @p input0 and @p input1 have been reshaped respectively with @ref CLGEMMReshapeLHSMatrixKernel" and @ref CLGEMMReshapeRHSMatrixKernel,
* the flag @p is_interleaved_transposed must be set to true
*
- * @attention The second input tensor must have at least 2 dimensions (matrix)
+ * @attention Vector C (@p input2) must be 1D. A broadcast addition is performed.
+ *
+ * @attention @p input1 tensor must have at least 2 dimensions (matrix)
*
*/
class CLGEMMMatrixMultiplyKernel : public ICLKernel
@@ -55,21 +57,25 @@ public:
*
* @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32
* @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
+ * @param[in] input2 Input tensor containing the Vector C. Can be nullptr. Data type supported: same as @p input0
* @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
* @param[in] alpha Weight of the matrix product
+ * @param[in] beta (Optional) Weight of vector C. Default value is 0. Only beta = 1 is currently supported.
* @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
* @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
* @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy
*
*/
- void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(),
- bool fp_mixed_precision = false);
+ void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta = 0.f,
+ bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(), bool fp_mixed_precision = false);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyKernel
*
- * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32
- * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
+ * @param[in] input0 Input tensor containing the Matrix A info. Data types supported: F16/F32
+ * @param[in] input1 Input tensor containing the Matrix B info. Data type supported: same as @p input0
+ * @param[in] input2 Input tensor containing the Vector C info. Can be nullptr. Data type supported: same as @p input0
* @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
* @param[in] alpha Weight of the matrix product
+ * @param[in] beta Weight of vector C. Default value is 0. Only beta = 1 is currently supported.
* @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
* @param[in] reshape_info GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
* @param[in] gpu_target GPU Target
@@ -77,8 +83,8 @@ public:
*
* @return a status
*/
- static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, float alpha, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info,
- GPUTarget gpu_target, bool fp_mixed_precision = false);
+ static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
+ bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision = false);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -86,10 +92,12 @@ public:
public:
const ICLTensor *_input0;
const ICLTensor *_input1;
+ const ICLTensor *_input2;
ICLTensor *_output;
bool _slide_matrix_b;
bool _reinterpret_input_as_3d;
bool _reinterpret_output_as_3d;
+ bool _has_vec_c;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_CLGEMMMATRIXMULTIPLYKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index d6d88cec55..e800dd7cbb 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -136,7 +136,7 @@ private:
CLGEMM _mm_gemm;
CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
- CLGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel;
+ CLGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel; // TODO(COMPMID-1889): Use CLGEMM to add bias in CLFullyConnectedLayer
CLTensor _flatten_output;
CLTensor _gemmlowp_output;
CLTensor _converted_weights_output;
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index d7694a8328..b304576f33 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -163,7 +163,7 @@ private:
* @param[in, out] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
* @param[in] gemmlowp_output_stage GEMMLowp output stage info
- * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
+ * @param[in] gemm_3d_depth Depth of GEMM 3D
*/
void configure_mm(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
@@ -175,13 +175,14 @@ private:
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
* @param[in] gemmlowp_output_stage GEMMLowp output stage info
- * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1)
- * @param[in] skip_im2col (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false)
+ * @param[in] gemm_3d_depth Depth of GEMM 3D
+ * @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout.
+ * @param[in] run_addition Flag which specifies if @ref CLGEMMMatrixMatrixMultiplyAddition to be run.
*
* @return a status
*/
static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
- int gemm_3d_depth = 1, bool skip_im2col = false);
+ int gemm_3d_depth, bool skip_im2col, bool run_addition);
private:
CLMemoryGroup _memory_group;
@@ -207,6 +208,7 @@ private:
bool _is_quantized;
bool _is_activationlayer_enabled;
bool _is_prepared;
+ bool _run_addition;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */