diff options
author | Gian Marco Iodice <gianmarco.iodice@arm.com> | 2019-06-14 16:11:10 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-06-20 16:02:39 +0000 |
commit | e16c8906a2aedf00e910754a01fca8bc4189cfc7 (patch) | |
tree | de9b88917bb00a76a9df68c9e92f05e38c5de817 /src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp | |
parent | 0cbfda629dd8f684e625173341bab972f004222c (diff) | |
download | ComputeLibrary-e16c8906a2aedf00e910754a01fca8bc4189cfc7.tar.gz |
COMPMID-2053: Fuse bias addition with CLGEMMMatrixMultiplyReshapedKernel
Change-Id: I5bfd38c94a6fd18a1cba2104f7e1b04e7bef6ec2
Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1359
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp')
-rw-r--r-- | src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index 4e518fcfd5..99f045a0bf 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -202,8 +202,7 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * _skip_col2im = data_layout == DataLayout::NHWC; _append_bias = (biases != nullptr) && (!_is_quantized); _is_activationlayer_enabled = act_info.enabled(); - // In case of F16, fused bias will be used in GEMM - _run_addition = (_skip_im2col) && (_append_bias) && (data_type != DataType::F16); + _run_addition = (_skip_im2col) && (_append_bias); // Set the GPU target for im2col and col2im _im2col_kernel.set_target(CLScheduler::get().target()); @@ -388,8 +387,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1); const bool skip_col2im = data_layout == DataLayout::NHWC; bool is_activationlayer_enabled = act_info.enabled(); - // In case of F16, fused bias will be used in GEMM - const bool run_addition = (skip_im2col) && (append_bias) && (data_type != DataType::F16); + const bool run_addition = (skip_im2col) && (append_bias); const UniformQuantizationInfo iq_info = input->quantization_info().uniform(); const UniformQuantizationInfo wq_info = weights->quantization_info().uniform(); |