diff options
author | Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> | 2018-11-16 11:33:12 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2018-11-16 17:37:40 +0000 |
commit | a25d16c86f0d870408bc8b941aa755093417b0f0 (patch) | |
tree | b62d145a4e5009d894262a7ffa66cdba8260bb03 /src/runtime/CL/functions/CLGEMM.cpp | |
parent | a7b54f44e2bf133179f24a34007bc93237dd2265 (diff) | |
download | ComputeLibrary-a25d16c86f0d870408bc8b941aa755093417b0f0.tar.gz |
COMPMID-1266 : Add support for FP16 in CLWinogradConvolutionLayer: 5x5 kernels
Introduced F32 accumulation for F16 winograd gemm and output transform
WinogradConvolution will be available for F16 only if fast math flag is enabled
Change-Id: I215593c205236a0f9669218437bb40b184ec6a4f
Diffstat (limited to 'src/runtime/CL/functions/CLGEMM.cpp')
-rw-r--r-- | src/runtime/CL/functions/CLGEMM.cpp | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index 6adbdc0cb6..baa0cf46dc 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -155,7 +155,8 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * // Configure and tune matrix multiply kernel _mm_kernel.configure(matrix_a, matrix_b, output, alpha, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, - depth_output_gemm3d, reinterpret_input_as_3d)); + depth_output_gemm3d, reinterpret_input_as_3d), + gemm_info.fp_mixed_precision()); CLScheduler::get().tune_kernel_static(_mm_kernel); if(_is_interleaved_transposed) @@ -236,7 +237,7 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso } // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output, alpha, run_interleave_transpose, reshape_info, gpu_target)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, output, alpha, run_interleave_transpose, reshape_info, gpu_target, gemm_info.fp_mixed_precision())); if(beta != 0 && c != nullptr) { |