From d02d5edfa15ba6c04a9986a8a362a945cb38ac31 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Fri, 22 Jan 2021 09:47:04 +0000 Subject: Integrate improved CPU depthwise convolution kernels * Replace assembly kernels for depthwise convolution with more optimized ones. * Add int8 assembly kernels. * Fix implicit padding on optimized kernels Resolves: COMPMID-3867, COMPMID-4361 Change-Id: I0b0867e05f61be4f368f62190d55e14d0ab3ebf2 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5622 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- .../cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp | 2 -- 1 file changed, 2 deletions(-) (limited to 'examples') diff --git a/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp b/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp index d8f8f1498a..ca7b7a5f04 100644 --- a/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp +++ b/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp @@ -236,7 +236,6 @@ public: gemmlowp_output_stage.output_data_type = dst.info()->data_type(); gemmlowp_output_stage.gemmlowp_offset = 0; { - const int idx_kernels = get_data_layout_dimension_index(lhs.info()->data_layout(), DataLayoutDimension::BATCHES); gemmlowp_output_stage.is_quantized_per_channel = false; // Num_filters is 1 unless quantized type is of per_channel type. Could be extended in the future to support per-channel quantization. const unsigned int num_filters = 1; @@ -249,7 +248,6 @@ public: quantization::compute_quantized_multipliers_and_shifts(lhs.info(), rhs.info(), dst.info(), - idx_kernels, gemmlowp_output_stage.gemmlowp_multipliers.data(), gemmlowp_output_stage.gemmlowp_shifts.data()); gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0]; -- cgit v1.2.1