From db9d46da3a8645d0c2cc71d035448999a36770ec Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Wed, 8 Aug 2018 12:29:38 +0100 Subject: COMPMID-1485 - Add support for NHWC when running NEGEMMConvolutionLayer with FP16/QASYMM8 When the GEMM3D check fails, now we fallback to the classic implementation with im2col and col2im. In this manner the function can work with QASYMM8 and FP16 Change-Id: I359e9da3a63956f33b5acbc9bca4383b14af10e2 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/143372 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'src/runtime/CL') diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index 26fd906dd1..1e639d9dff 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -214,8 +214,6 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * // Just append biases and do not transpose 1xW as it will be reshaped in CLGEMM _reshape_weights.configure(weights, biases_to_use, &_weights_reshaped); - weights = &_weights_reshaped; - // Create tensor to store im2col reshaped inputs if(!_skip_im2col) { @@ -258,7 +256,7 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * } // Configure and tune GEMM - configure_mm(gemm_input_to_use, weights, gemm_output_to_use, (data_layout == DataLayout::NHWC) ? conv_h : 1); + configure_mm(gemm_input_to_use, &_weights_reshaped, gemm_output_to_use, (data_layout == DataLayout::NHWC) ? conv_h : 1); if(!_skip_im2col) { -- cgit v1.2.1