From 082630ba4e2de697b6d372dd35ffc1be6a42c346 Mon Sep 17 00:00:00 2001 From: Anitha Raj Date: Tue, 22 Aug 2023 15:46:27 +0100 Subject: Update CpuGemmConv2d and CpuFlatten to use CpuReshape operator - Following CpuReshapeKernel Optimizations, update the CpuGemmConv2D and CpuFlatten to use CpuReshape operator instead of CpuReshapeKernel - Minor changes to comment in NEReorgLayerKernel.h Resolves COMPMID-6504 Signed-off-by: Anitha Raj Change-Id: Ib6ee1fdc313d91249f9fe41c81e73324031c1ff4 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10186 Reviewed-by: Jakub Sujak Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- src/cpu/operators/CpuGemmConv2d.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'src/cpu/operators/CpuGemmConv2d.cpp') diff --git a/src/cpu/operators/CpuGemmConv2d.cpp b/src/cpu/operators/CpuGemmConv2d.cpp index 7c0e58b94e..d11e4f0b24 100644 --- a/src/cpu/operators/CpuGemmConv2d.cpp +++ b/src/cpu/operators/CpuGemmConv2d.cpp @@ -35,11 +35,11 @@ #include "src/core/helpers/MemoryHelpers.h" #include "src/cpu/kernels/CpuCol2ImKernel.h" #include "src/cpu/kernels/CpuIm2ColKernel.h" -#include "src/cpu/kernels/CpuReshapeKernel.h" #include "src/cpu/kernels/CpuWeightsReshapeKernel.h" #include "src/cpu/operators/CpuGemm.h" #include "src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h" #include "src/cpu/operators/CpuGemmLowpOutputStage.h" +#include "src/cpu/operators/CpuReshape.h" #include "src/cpu/utils/CpuAuxTensorHandler.h" #include @@ -92,7 +92,7 @@ CpuGemmConv2d::SkipInfo CpuGemmConv2d::skip_im_col_info(const ITensorInfo *src, } CpuGemmConv2d::CpuGemmConv2d() - : _weights_reshape_kernel(nullptr), _im2col_kernel(), _mm_gemm(), _mm_gemmlowp(), _col2im_kernel(), _reshape_kernel(), _im2col_output(), _weights_reshaped(), _gemm_output(), _gemm_output_3d(), + : _weights_reshape_kernel(nullptr), _im2col_kernel(), _mm_gemm(), _mm_gemmlowp(), _col2im_kernel(), _reshape(), _im2col_output(), _weights_reshaped(), _gemm_output(), _gemm_output_3d(), _data_layout(DataLayout::NCHW), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count) { } @@ -379,8 +379,8 @@ void CpuGemmConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights else { // Configure reshape layer - _reshape_kernel = std::make_unique(); - _reshape_kernel->configure(gemm_output_to_use, dst); + _reshape = std::make_unique(); + _reshape->configure(gemm_output_to_use, dst); } // Check if GEMM transforms weights @@ -642,7 +642,7 @@ void CpuGemmConv2d::run(ITensorPack &tensors) { TensorType::ACL_SRC, gemm_output_to_use }, { TensorType::ACL_DST, dst } }; - NEScheduler::get().schedule_op(_reshape_kernel.get(), Window::DimY, _reshape_kernel->window(), pack); + _reshape->run(pack); } } else if(out_has_padding) @@ -652,7 +652,7 @@ void CpuGemmConv2d::run(ITensorPack &tensors) { TensorType::ACL_SRC, gemm_output_to_use }, { TensorType::ACL_DST, dst } }; - NEScheduler::get().schedule_op(_reshape_kernel.get(), Window::DimY, _reshape_kernel->window(), pack); + _reshape->run(pack); } } -- cgit v1.2.1