aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp')
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp205
1 files changed, 122 insertions, 83 deletions
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index a561b88058..6c085645db 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,7 +27,9 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/runtime/cpu/operators/CpuDepthwiseConv2d.h"
+
+#include "src/common/utils/Log.h"
+#include "src/cpu/operators/CpuDepthwiseConv2d.h"
using namespace arm_compute::misc;
using namespace arm_compute::misc::shape_calculator;
@@ -38,38 +40,35 @@ NEDepthwiseConvolutionLayer::~NEDepthwiseConvolutionLayer() = default;
struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::Impl
{
- ITensor *src{ nullptr }; // SRC_0
- ITensor *dst{ nullptr }; // DST_0
- const ITensor *weights
- {
- nullptr
- }; // SRC_1
- const ITensor *biases
- {
- nullptr
- }; // SRC_2
+ ITensor *src{nullptr}; // SRC_0
+ ITensor *dst{nullptr}; // DST_0
+ const ITensor *weights{nullptr}; // SRC_1
+ const ITensor *biases{nullptr}; // SRC_2
Tensor permuted_input{}; // INT_0
Tensor permuted_weights{}; // INT_1
Tensor permuted_output{}; // INT_2
Tensor workspace{}; // INT_3
Tensor packed_weights{}; // INT_4
- std::shared_ptr<cpu::CpuDepthwiseConv2d> op{ nullptr };
- bool is_prepared{ false };
- bool permute{ false };
+ std::shared_ptr<cpu::CpuDepthwiseConv2d> op{nullptr};
+ bool is_prepared{false};
+ bool permute{false};
};
-NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::NEDepthwiseConvolutionLayerOptimizedInternal(std::shared_ptr<IMemoryManager> memory_manager)
+NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::NEDepthwiseConvolutionLayerOptimizedInternal(
+ std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(memory_manager), _impl(std::make_unique<Impl>())
{
}
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure(ITensor *input,
- const ITensor *weights,
- const ITensor *biases,
- ITensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier,
- const ActivationLayerInfo &act_info,
- const Size2D &dilation)
+void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure(
+ ITensor *input,
+ const ITensor *weights,
+ const ITensor *biases,
+ ITensor *output,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
@@ -81,9 +80,9 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
_impl->permute = is_nhwc;
_impl->op = std::make_unique<cpu::CpuDepthwiseConv2d>();
- ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation };
- _impl->op->configure(_impl->src->info(), _impl->weights->info(), _impl->biases == nullptr ? nullptr : _impl->biases->info(),
- _impl->dst->info(), info);
+ ConvolutionInfo info{conv_info, depth_multiplier, act_info, dilation};
+ _impl->op->configure(_impl->src->info(), _impl->weights->info(),
+ _impl->biases == nullptr ? nullptr : _impl->biases->info(), _impl->dst->info(), info);
// Configure pipeline
ActivationLayerInfo act_info_to_use = ActivationLayerInfo();
@@ -91,15 +90,15 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
const bool is_relu6 = arm_compute::utils::info_helpers::is_relu6(act_info);
bool is_activationlayer_enabled = act_info.enabled() && !(is_relu || is_relu6);
- if(!is_activationlayer_enabled)
+ if (!is_activationlayer_enabled)
{
act_info_to_use = act_info;
}
- info = ConvolutionInfo{ conv_info, depth_multiplier, act_info_to_use, dilation };
+ info = ConvolutionInfo{conv_info, depth_multiplier, act_info_to_use, dilation};
auto dwc_optimized_func = std::make_unique<cpu::CpuDepthwiseConv2dAssemblyDispatch>();
- if(is_nhwc)
+ if (is_nhwc)
{
auto permute_input = std::make_unique<cpu::CpuPermute>();
auto permute_weights = std::make_unique<cpu::CpuPermute>();
@@ -121,7 +120,9 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
_impl->permuted_output.info()->set_quantization_info(output->info()->quantization_info());
// Configure optimized depthwise
- dwc_optimized_func->configure(_impl->permuted_input.info(), _impl->permuted_weights.info(), biases == nullptr ? nullptr : biases->info(), _impl->permuted_output.info(), info);
+ dwc_optimized_func->configure(_impl->permuted_input.info(), _impl->permuted_weights.info(),
+ biases == nullptr ? nullptr : biases->info(), _impl->permuted_output.info(),
+ info);
// Configure the function to transform the convoluted output to ACL's native ordering format NCHW
_impl->permuted_output.info()->set_data_layout(DataLayout::NHWC);
@@ -132,28 +133,33 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
}
else
{
- dwc_optimized_func->configure(_impl->src->info(), _impl->weights->info(), biases == nullptr ? nullptr : biases->info(), _impl->dst->info(), info);
+ dwc_optimized_func->configure(_impl->src->info(), _impl->weights->info(),
+ biases == nullptr ? nullptr : biases->info(), _impl->dst->info(), info);
}
// Allocate memory based on the internal memory requirements
experimental::MemoryRequirements mem_req = dwc_optimized_func->workspace();
- _impl->workspace.allocator()->init(TensorInfo(TensorShape{ mem_req[0].size }, 1, DataType::S8), mem_req[0].alignment);
- _impl->packed_weights.allocator()->init(TensorInfo(TensorShape{ mem_req[1].size }, 1, DataType::S8), mem_req[1].alignment);
-
+ _impl->workspace.allocator()->init(TensorInfo(TensorShape{mem_req[0].size + mem_req[0].alignment}, 1, DataType::S8),
+ mem_req[0].alignment);
+ _impl->packed_weights.allocator()->init(
+ TensorInfo(TensorShape{mem_req[1].size + mem_req[1].alignment}, 1, DataType::S8), mem_req[1].alignment);
+ _memory_group.manage(&_impl->workspace);
+ _memory_group.manage(&_impl->packed_weights);
_impl->workspace.allocator()->allocate();
_impl->packed_weights.allocator()->allocate();
}
-Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::validate(const ITensorInfo *input,
- const ITensorInfo *weights,
- const ITensorInfo *biases,
- const ITensorInfo *output,
- const PadStrideInfo &conv_info,
- unsigned int depth_multiplier,
- const ActivationLayerInfo &act_info,
- const Size2D &dilation)
+Status
+NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::validate(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *biases,
+ const ITensorInfo *output,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
- ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation };
+ ConvolutionInfo info{conv_info, depth_multiplier, act_info, dilation};
return cpu::CpuDepthwiseConv2d::validate(input, weights, biases, output, info);
}
@@ -178,15 +184,15 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::prepare()
{
- if(!_impl->is_prepared)
+ if (!_impl->is_prepared)
{
// Permute weights
- if(_impl->permute)
+ if (_impl->permute)
{
_impl->permuted_weights.allocator()->allocate();
}
- if(!_impl->permuted_weights.is_used())
+ if (!_impl->permuted_weights.is_used())
{
_impl->permuted_weights.allocator()->free();
}
@@ -200,14 +206,14 @@ struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::Impl
Tensor permuted_input{};
Tensor permuted_weights{};
Tensor permuted_output{};
- bool is_prepared{ false };
- bool is_nchw{ false };
- bool is_activationlayer_enabled{ false };
- const ITensor *weights{ nullptr };
- const ITensor *biases{ nullptr };
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- std::shared_ptr<cpu::CpuDepthwiseConv2d> op{ nullptr };
+ bool is_prepared{false};
+ bool is_nchw{false};
+ bool is_activationlayer_enabled{false};
+ const ITensor *weights{nullptr};
+ const ITensor *biases{nullptr};
+ const ITensor *src{nullptr};
+ ITensor *dst{nullptr};
+ std::shared_ptr<cpu::CpuDepthwiseConv2d> op{nullptr};
};
NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::NEDepthwiseConvolutionLayerGeneric()
@@ -215,16 +221,21 @@ NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::NEDepthwiseConv
{
}
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(ITensor *input,
+ const ITensor *weights,
+ const ITensor *biases,
+ ITensor *output,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionLayer::validate(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(),
- output->info(), conv_info, depth_multiplier, act_info, dilation));
- const ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation };
+ const ConvolutionInfo info{conv_info, depth_multiplier, act_info, dilation};
_impl->op = std::make_unique<cpu::CpuDepthwiseConv2d>();
- _impl->op->configure(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output->info(), info);
+ _impl->op->configure(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output->info(),
+ info);
_impl->src = input;
_impl->dst = output;
@@ -236,7 +247,7 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(
ITensor *input_to_use = input;
const ITensor *weights_to_use = weights;
ITensor *output_to_use = output;
- if(_impl->is_nchw)
+ if (_impl->is_nchw)
{
auto permute_input = std::make_unique<cpu::CpuPermute>();
auto permute_weights = std::make_unique<cpu::CpuPermute>();
@@ -249,14 +260,16 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(
_impl->permuted_weights.info()->set_data_layout(DataLayout::NHWC);
weights_to_use = &_impl->permuted_weights;
- _impl->permuted_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(TensorShape()));
+ _impl->permuted_output.allocator()->init(
+ output->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(TensorShape()));
output_to_use = &_impl->permuted_output;
}
auto depthwise_conv_kernel = std::make_unique<cpu::kernels::CpuDepthwiseConv2dNativeKernel>();
- depthwise_conv_kernel->configure(input_to_use->info(), weights_to_use->info(), biases == nullptr ? nullptr : biases->info(), output_to_use->info(), info);
+ depthwise_conv_kernel->configure(input_to_use->info(), weights_to_use->info(),
+ biases == nullptr ? nullptr : biases->info(), output_to_use->info(), info);
- if(_impl->is_nchw)
+ if (_impl->is_nchw)
{
auto permute_output = std::make_unique<cpu::CpuPermute>();
permute_output->configure(_impl->permuted_output.info(), output->info(), PermutationVector(1U, 2U, 0U));
@@ -268,11 +281,16 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(
}
}
-Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
+Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::validate(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *biases,
+ const ITensorInfo *output,
const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
- ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation };
+ ConvolutionInfo info{conv_info, depth_multiplier, act_info, dilation};
return cpu::CpuDepthwiseConv2d::validate(input, weights, biases, output, info);
}
@@ -298,43 +316,64 @@ NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer(std::shared_ptr<IMemory
#ifndef DOXYGEN_SKIP_THIS
struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayer::Impl
{
- DepthwiseConvolutionFunction depth_conv_func{ DepthwiseConvolutionFunction::OPTIMIZED };
- NEDepthwiseConvolutionLayerOptimizedInternal func_optimized{ nullptr };
+ DepthwiseConvolutionFunction depth_conv_func{DepthwiseConvolutionFunction::OPTIMIZED};
+ NEDepthwiseConvolutionLayerOptimizedInternal func_optimized{nullptr};
NEDepthwiseConvolutionLayerGeneric func_generic{};
- std::shared_ptr<cpu::CpuDepthwiseConv2d> op{ nullptr };
+ std::shared_ptr<cpu::CpuDepthwiseConv2d> op{nullptr};
};
#endif // DOXYGEN_SKIP_THIS
-void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
- const ActivationLayerInfo &act_info, const Size2D &dilation)
+void NEDepthwiseConvolutionLayer::configure(ITensor *input,
+ const ITensor *weights,
+ const ITensor *biases,
+ ITensor *output,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
- const ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation };
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+
+ ARM_COMPUTE_LOG_PARAMS(input, weights, output, conv_info, depth_multiplier, biases, act_info, dilation);
+ ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionLayer::validate(
+ input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(), output->info(), conv_info,
+ depth_multiplier, act_info, dilation));
+
+ const ConvolutionInfo info{conv_info, depth_multiplier, act_info, dilation};
_impl->op = std::make_shared<cpu::CpuDepthwiseConv2d>();
- _impl->depth_conv_func = _impl->op->get_depthwiseconvolution_function(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(),
- info);
- switch(_impl->depth_conv_func)
+ _impl->depth_conv_func = _impl->op->get_depthwiseconvolution_function(
+ input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), info);
+ switch (_impl->depth_conv_func)
{
case DepthwiseConvolutionFunction::OPTIMIZED:
- _impl->func_optimized.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
+ _impl->func_optimized.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info,
+ dilation);
break;
case DepthwiseConvolutionFunction::GENERIC:
- _impl->func_generic.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
+ _impl->func_generic.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info,
+ dilation);
break;
default:
ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
}
}
-Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *biases,
+ const ITensorInfo *output,
+ const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
- ConvolutionInfo info{ conv_info, depth_multiplier, act_info, dilation };
+ ConvolutionInfo info{conv_info, depth_multiplier, act_info, dilation};
return cpu::CpuDepthwiseConv2d::validate(input, weights, biases, output, info);
}
void NEDepthwiseConvolutionLayer::run()
{
- switch(_impl->depth_conv_func)
+ switch (_impl->depth_conv_func)
{
case DepthwiseConvolutionFunction::OPTIMIZED:
_impl->func_optimized.run();
@@ -349,7 +388,7 @@ void NEDepthwiseConvolutionLayer::run()
void NEDepthwiseConvolutionLayer::prepare()
{
- switch(_impl->depth_conv_func)
+ switch (_impl->depth_conv_func)
{
case DepthwiseConvolutionFunction::OPTIMIZED:
_impl->func_optimized.prepare();