From b4bb6a03f717a320b935809fde795b3d6ec5a69f Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Mon, 24 May 2021 16:01:32 +0100 Subject: Rename ported functions Rename CpuPooling to CpuPool2d Rename CpuPoolingKernel to CpuPool2dKernel Rename CpuPoolingAssemblyWrapperKernel to CpuPool2dAssemblyWrapperKernel Move CpuPool2dAssemblyWrapperKernel in internal subfolder Rename CpuDepthwiseConvolutionNativeKernel to CpuDepthwiseConv2dNativeKernel Rename CpuDepthwiseConvolutionAssemblyDispatch to CpuDepthwiseConv2dAssemblyDispatch Rename CpuDepthwiseConvolution to CpuDepthwiseConv2d Rename CpuDirectConvolutionKernel to CpuDirectConv2dKernel Rename CpuDirectConvolutionOutputStageKernel to CpuDirectConv2dOutputStageKernel Rename CpuDirectConvolution to CpuDirectConv2d Rename ClPoolingKernel to ClPool2dKernel Rename ClPooling to ClPool2d Rename ClDirectConvolutionKernel to ClDirectConv2dKernel Resolves: COMPMID-4405 Change-Id: I8e48f015e4e492a76a7512f5679cb3eb0cd028f6 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5708 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- .../NEON/functions/NEDepthwiseConvolutionLayer.cpp | 66 +++++++++++----------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp') diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp index da9610ef42..a561b88058 100644 --- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp @@ -27,7 +27,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/runtime/cpu/operators/CpuDepthwiseConvolution.h" +#include "src/runtime/cpu/operators/CpuDepthwiseConv2d.h" using namespace arm_compute::misc; using namespace arm_compute::misc::shape_calculator; @@ -47,15 +47,15 @@ struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal const ITensor *biases { nullptr - }; // SRC_2 - Tensor permuted_input{}; // INT_0 - Tensor permuted_weights{}; // INT_1 - Tensor permuted_output{}; // INT_2 - Tensor workspace{}; // INT_3 - Tensor packed_weights{}; // INT_4 - std::shared_ptr op{ nullptr }; - bool is_prepared{ false }; - bool permute{ false }; + }; // SRC_2 + Tensor permuted_input{}; // INT_0 + Tensor permuted_weights{}; // INT_1 + Tensor permuted_output{}; // INT_2 + Tensor workspace{}; // INT_3 + Tensor packed_weights{}; // INT_4 + std::shared_ptr op{ nullptr }; + bool is_prepared{ false }; + bool permute{ false }; }; NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::NEDepthwiseConvolutionLayerOptimizedInternal(std::shared_ptr memory_manager) @@ -80,7 +80,7 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal:: _impl->dst = output; _impl->permute = is_nhwc; - _impl->op = std::make_unique(); + _impl->op = std::make_unique(); ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation }; _impl->op->configure(_impl->src->info(), _impl->weights->info(), _impl->biases == nullptr ? nullptr : _impl->biases->info(), _impl->dst->info(), info); @@ -97,7 +97,7 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal:: } info = ConvolutionInfo{ conv_info, depth_multiplier, act_info_to_use, dilation }; - auto dwc_optimized_func = std::make_unique(); + auto dwc_optimized_func = std::make_unique(); if(is_nhwc) { @@ -154,7 +154,7 @@ Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal const Size2D &dilation) { ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation }; - return cpu::CpuDepthwiseConvolution::validate(input, weights, biases, output, info); + return cpu::CpuDepthwiseConv2d::validate(input, weights, biases, output, info); } void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::run() @@ -197,17 +197,17 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal:: struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::Impl { - Tensor permuted_input{}; - Tensor permuted_weights{}; - Tensor permuted_output{}; - bool is_prepared{ false }; - bool is_nchw{ false }; - bool is_activationlayer_enabled{ false }; - const ITensor *weights{ nullptr }; - const ITensor *biases{ nullptr }; - const ITensor *src{ nullptr }; - ITensor *dst{ nullptr }; - std::shared_ptr op{ nullptr }; + Tensor permuted_input{}; + Tensor permuted_weights{}; + Tensor permuted_output{}; + bool is_prepared{ false }; + bool is_nchw{ false }; + bool is_activationlayer_enabled{ false }; + const ITensor *weights{ nullptr }; + const ITensor *biases{ nullptr }; + const ITensor *src{ nullptr }; + ITensor *dst{ nullptr }; + std::shared_ptr op{ nullptr }; }; NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::NEDepthwiseConvolutionLayerGeneric() @@ -223,7 +223,7 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure( output->info(), conv_info, depth_multiplier, act_info, dilation)); const ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation }; - _impl->op = std::make_unique(); + _impl->op = std::make_unique(); _impl->op->configure(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output->info(), info); _impl->src = input; @@ -253,7 +253,7 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure( output_to_use = &_impl->permuted_output; } - auto depthwise_conv_kernel = std::make_unique(); + auto depthwise_conv_kernel = std::make_unique(); depthwise_conv_kernel->configure(input_to_use->info(), weights_to_use->info(), biases == nullptr ? nullptr : biases->info(), output_to_use->info(), info); if(_impl->is_nchw) @@ -273,7 +273,7 @@ Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::validate unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation) { ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation }; - return cpu::CpuDepthwiseConvolution::validate(input, weights, biases, output, info); + return cpu::CpuDepthwiseConv2d::validate(input, weights, biases, output, info); } void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::run() @@ -298,10 +298,10 @@ NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer(std::shared_ptr op{ nullptr }; + DepthwiseConvolutionFunction depth_conv_func{ DepthwiseConvolutionFunction::OPTIMIZED }; + NEDepthwiseConvolutionLayerOptimizedInternal func_optimized{ nullptr }; + NEDepthwiseConvolutionLayerGeneric func_generic{}; + std::shared_ptr op{ nullptr }; }; #endif // DOXYGEN_SKIP_THIS @@ -309,7 +309,7 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh const ActivationLayerInfo &act_info, const Size2D &dilation) { const ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation }; - _impl->op = std::make_shared(); + _impl->op = std::make_shared(); _impl->depth_conv_func = _impl->op->get_depthwiseconvolution_function(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), info); switch(_impl->depth_conv_func) @@ -329,7 +329,7 @@ Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation) { ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation }; - return cpu::CpuDepthwiseConvolution::validate(input, weights, biases, output, info); + return cpu::CpuDepthwiseConv2d::validate(input, weights, biases, output, info); } void NEDepthwiseConvolutionLayer::run() -- cgit v1.2.1