diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp')
-rw-r--r-- | src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp | 165 |
1 files changed, 84 insertions, 81 deletions
diff --git a/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp b/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp index b503a8b734..32d9ca4eac 100644 --- a/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp +++ b/src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp @@ -24,18 +24,17 @@ #include "src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.h" #include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" +#include "arm_compute/core/Validate.h" + #include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" -#include "src/core/utils/AssemblyUtils.h" - #include "src/core/NEON/kernels/assembly/depthwise.hpp" +#include "src/core/utils/AssemblyUtils.h" #include "depthwise_common.hpp" - #include <arm_neon.h> namespace arm_compute @@ -54,9 +53,13 @@ constexpr unsigned int idx_channels = 0; constexpr unsigned int idx_batches = 3; template <typename TSrc, typename TWeights, typename TDst> -void create_arm_dwc(const ITensorInfo *src, const ITensorInfo *weights, ITensorInfo *dst, - const ConvolutionInfo &info, const CPUInfo &cpu_info, - std::unique_ptr<arm_conv::depthwise::IDepthwiseCommon> &kernel, std::string &_name) +void create_arm_dwc(const ITensorInfo *src, + const ITensorInfo *weights, + ITensorInfo *dst, + const ConvolutionInfo &info, + const CPUInfo &cpu_info, + std::unique_ptr<arm_conv::depthwise::IDepthwiseCommon> &kernel, + std::string &_name) { unsigned int stride_cols{}; unsigned int stride_rows{}; @@ -79,13 +82,13 @@ void create_arm_dwc(const ITensorInfo *src, const ITensorInfo *weights, ITensorI const arm_gemm::Activation activation = assembly_utils::map_to_arm_gemm_activation(info.act_info); - arm_conv::depthwise::DepthwiseArgs args(&cpu_info, kernel_rows, kernel_cols, stride_rows, stride_cols, dilation_rows, dilation_cols, - n_batches, src_rows, src_cols, n_channels, dst_rows, dst_cols, info.depth_multiplier, - padding, activation, nullptr); + arm_conv::depthwise::DepthwiseArgs args(&cpu_info, kernel_rows, kernel_cols, stride_rows, stride_cols, + dilation_rows, dilation_cols, n_batches, src_rows, src_cols, n_channels, + dst_rows, dst_cols, info.depth_multiplier, padding, activation, nullptr); // Configure assembly pooling kernel auto dwc_kernel_asm = arm_conv::depthwise::depthwise<TSrc, TWeights, TDst>(args); - if(dwc_kernel_asm == nullptr) + if (dwc_kernel_asm == nullptr) { // Configuration not supported: Leave function unconfigured: return; @@ -96,11 +99,16 @@ void create_arm_dwc(const ITensorInfo *src, const ITensorInfo *weights, ITensorI } template <typename TSrc, typename TWeights, typename TDst> -void create_arm_dwc_quant(const ITensorInfo *src, const ITensorInfo *weights, ITensorInfo *dst, - const ConvolutionInfo &info, const CPUInfo &cpu_info, +void create_arm_dwc_quant(const ITensorInfo *src, + const ITensorInfo *weights, + ITensorInfo *dst, + const ConvolutionInfo &info, + const CPUInfo &cpu_info, std::unique_ptr<arm_conv::depthwise::IDepthwiseCommon> &kernel, - std::vector<int32_t> &multipliers, std::vector<int32_t> &right_shifts, std::vector<int32_t> &left_shifts, - std::string &_name) + std::vector<int32_t> &multipliers, + std::vector<int32_t> &right_shifts, + std::vector<int32_t> &left_shifts, + std::string &_name) { unsigned int stride_cols{}; unsigned int stride_rows{}; @@ -123,9 +131,9 @@ void create_arm_dwc_quant(const ITensorInfo *src, const ITensorInfo *weights, IT const arm_gemm::Activation activation = assembly_utils::map_to_arm_gemm_activation(info.act_info); - arm_conv::depthwise::DepthwiseArgs args(&cpu_info, kernel_rows, kernel_cols, stride_rows, stride_cols, dilation_rows, dilation_cols, - n_batches, src_rows, src_cols, n_channels, dst_rows, dst_cols, info.depth_multiplier, - padding, activation, nullptr); + arm_conv::depthwise::DepthwiseArgs args(&cpu_info, kernel_rows, kernel_cols, stride_rows, stride_cols, + dilation_rows, dilation_cols, n_batches, src_rows, src_cols, n_channels, + dst_rows, dst_cols, info.depth_multiplier, padding, activation, nullptr); const auto src_qinfo = src->quantization_info().uniform(); const auto weights_qinfo = weights->quantization_info(); @@ -135,64 +143,50 @@ void create_arm_dwc_quant(const ITensorInfo *src, const ITensorInfo *weights, IT multipliers.resize(num_filters); std::vector<int32_t> dst_shifts(num_filters); - quantization::compute_quantized_multipliers_and_shifts(src, - weights, - dst, - multipliers.data(), - dst_shifts.data()); + quantization::compute_quantized_multipliers_and_shifts(src, weights, dst, multipliers.data(), dst_shifts.data()); // Quantize activation bounds int32_t min_activation = std::numeric_limits<TSrc>::lowest(); int32_t max_activation = std::numeric_limits<TSrc>::max(); - if(info.act_info.enabled()) + if (info.act_info.enabled()) { - std::tie(min_activation, max_activation) = get_quantized_activation_min_max(info.act_info, src->data_type(), dst_qinfo); + std::tie(min_activation, max_activation) = + get_quantized_activation_min_max(info.act_info, src->data_type(), dst_qinfo); } // Set quantization parameters for assembly kernels arm_gemm::Requantize32 requant_args{}; - if(is_data_type_quantized_per_channel(weights->data_type())) + if (is_data_type_quantized_per_channel(weights->data_type())) { left_shifts.resize(num_filters); right_shifts.resize(num_filters); bool need_left_shift = false; // Select more optimized path if left shift is not needed - for(unsigned int i = 0; i < num_filters; ++i) + for (unsigned int i = 0; i < num_filters; ++i) { left_shifts[i] = std::max(-dst_shifts[i], static_cast<int32_t>(0)); right_shifts[i] = std::min(-dst_shifts[i], static_cast<int32_t>(0)); - if(dst_shifts[i] < 0 && !need_left_shift) + if (dst_shifts[i] < 0 && !need_left_shift) { need_left_shift = true; } } - requant_args = arm_gemm::Requantize32(nullptr, - 0, - src_qinfo.offset, - weights_qinfo.uniform().offset, - dst_qinfo.offset, - (need_left_shift) ? left_shifts.data() : nullptr, - right_shifts.data(), - multipliers.data(), - static_cast<TSrc>(min_activation), - static_cast<TSrc>(max_activation)); + requant_args = arm_gemm::Requantize32(nullptr, 0, src_qinfo.offset, weights_qinfo.uniform().offset, + dst_qinfo.offset, (need_left_shift) ? left_shifts.data() : nullptr, + right_shifts.data(), multipliers.data(), + static_cast<TSrc>(min_activation), static_cast<TSrc>(max_activation)); } else { - requant_args = arm_gemm::Requantize32(nullptr, - 0, - src_qinfo.offset, - weights_qinfo.uniform().offset, - dst_qinfo.offset, - -dst_shifts[0], - multipliers[0], - static_cast<TSrc>(min_activation), - static_cast<TSrc>(max_activation)); + requant_args = arm_gemm::Requantize32(nullptr, 0, src_qinfo.offset, weights_qinfo.uniform().offset, + dst_qinfo.offset, -dst_shifts[0], multipliers[0], + static_cast<TSrc>(min_activation), static_cast<TSrc>(max_activation)); } // Configure assembly pooling kernel with requantization - auto dwc_kernel_asm = arm_conv::depthwise::depthwise<TSrc, TWeights, TDst, arm_gemm::Requantize32>(args, requant_args); - if(dwc_kernel_asm == nullptr) + auto dwc_kernel_asm = + arm_conv::depthwise::depthwise<TSrc, TWeights, TDst, arm_gemm::Requantize32>(args, requant_args); + if (dwc_kernel_asm == nullptr) { // Configuration not supported: Leave function unconfigured: return; @@ -203,18 +197,18 @@ void create_arm_dwc_quant(const ITensorInfo *src, const ITensorInfo *weights, IT } // namespace CpuDepthwiseConv2dAssemblyWrapperKernel::CpuDepthwiseConv2dAssemblyWrapperKernel() - : _kernel_asm(nullptr), - _multipliers(), - _left_shifts(), - _right_shifts(), - _name() + : _kernel_asm(nullptr), _multipliers(), _left_shifts(), _right_shifts(), _name() { } CpuDepthwiseConv2dAssemblyWrapperKernel::~CpuDepthwiseConv2dAssemblyWrapperKernel() = default; -void CpuDepthwiseConv2dAssemblyWrapperKernel::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *, ITensorInfo *dst, - const ConvolutionInfo &info, const CPUInfo &cpu_info) +void CpuDepthwiseConv2dAssemblyWrapperKernel::configure(const ITensorInfo *src, + const ITensorInfo *weights, + const ITensorInfo *, + ITensorInfo *dst, + const ConvolutionInfo &info, + const CPUInfo &cpu_info) { ARM_COMPUTE_UNUSED(cpu_info); ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst); @@ -225,24 +219,30 @@ void CpuDepthwiseConv2dAssemblyWrapperKernel::configure(const ITensorInfo *src, _name = "CpuDepthwiseConv2dAssemblyWrapperKernel"; std::string asm_kernel_name(""); #if defined(__aarch64__) - switch(src->data_type()) + switch (src->data_type()) { case DataType::QASYMM8: - if(is_data_type_quantized_per_channel(weights->data_type())) + if (is_data_type_quantized_per_channel(weights->data_type())) { - create_arm_dwc_quant<uint8_t, int8_t, uint8_t>(src, weights, dst, info, cpu_info, _kernel_asm, _multipliers, _right_shifts, _left_shifts, asm_kernel_name); + create_arm_dwc_quant<uint8_t, int8_t, uint8_t>(src, weights, dst, info, cpu_info, _kernel_asm, + _multipliers, _right_shifts, _left_shifts, + asm_kernel_name); } else { - create_arm_dwc_quant<uint8_t, uint8_t, uint8_t>(src, weights, dst, info, cpu_info, _kernel_asm, _multipliers, _right_shifts, _left_shifts, asm_kernel_name); + create_arm_dwc_quant<uint8_t, uint8_t, uint8_t>(src, weights, dst, info, cpu_info, _kernel_asm, + _multipliers, _right_shifts, _left_shifts, + asm_kernel_name); } break; case DataType::QASYMM8_SIGNED: - create_arm_dwc_quant<int8_t, int8_t, int8_t>(src, weights, dst, info, cpu_info, _kernel_asm, _multipliers, _right_shifts, _left_shifts, asm_kernel_name); + create_arm_dwc_quant<int8_t, int8_t, int8_t>(src, weights, dst, info, cpu_info, _kernel_asm, _multipliers, + _right_shifts, _left_shifts, asm_kernel_name); break; #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) case DataType::F16: - create_arm_dwc<float16_t, float16_t, float16_t>(src, weights, dst, info, cpu_info, _kernel_asm, asm_kernel_name); + create_arm_dwc<float16_t, float16_t, float16_t>(src, weights, dst, info, cpu_info, _kernel_asm, + asm_kernel_name); break; #endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) case DataType::F32: @@ -255,13 +255,17 @@ void CpuDepthwiseConv2dAssemblyWrapperKernel::configure(const ITensorInfo *src, Window win = calculate_max_window(*dst, Steps()); ICpuKernel::configure(win); - if(_kernel_asm != nullptr) + if (_kernel_asm != nullptr) { _name += "/" + asm_kernel_name; } } -Status CpuDepthwiseConv2dAssemblyWrapperKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *dst, const ConvolutionInfo &info) +Status CpuDepthwiseConv2dAssemblyWrapperKernel::validate(const ITensorInfo *src, + const ITensorInfo *weights, + const ITensorInfo *bias, + const ITensorInfo *dst, + const ConvolutionInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); @@ -269,10 +273,12 @@ Status CpuDepthwiseConv2dAssemblyWrapperKernel::validate(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_MSG("32-bit is not supported by assembly kernels"); #endif // !defined(__aarch64__) ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NHWC, "Only NHWC is supported by assembly kernels"); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, + DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NHWC, + "Only NHWC is supported by assembly kernels"); - if(is_data_type_quantized_per_channel(weights->data_type())) + if (is_data_type_quantized_per_channel(weights->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(0) != weights->quantization_info().scale().size()); @@ -282,12 +288,12 @@ Status CpuDepthwiseConv2dAssemblyWrapperKernel::validate(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights); } - if(bias != nullptr) + if (bias != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights->dimension(0)); - if(is_data_type_quantized(src->data_type())) + if (is_data_type_quantized(src->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); } @@ -297,7 +303,7 @@ Status CpuDepthwiseConv2dAssemblyWrapperKernel::validate(const ITensorInfo *src, } } - if(dst->total_size() > 0) + if (dst->total_size() > 0) { const TensorShape dst_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*src, *weights, info); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape); @@ -305,17 +311,15 @@ Status CpuDepthwiseConv2dAssemblyWrapperKernel::validate(const ITensorInfo *src, } // Assembly kernels cannot work with padding greater than the kernel. - const auto &padding = info.pad_stride_info; - const auto &dilation = info.dilation; + const auto &padding = info.pad_stride_info; + const auto &dilation = info.dilation; const auto &wei_shape = weights->tensor_shape(); const auto dilated_wei_w = wei_shape[1] + (wei_shape[1] - 1) * (dilation.x() - 1); const auto dilated_wei_h = wei_shape[2] + (wei_shape[2] - 1) * (dilation.y() - 1); - ARM_COMPUTE_RETURN_ERROR_ON( - padding.pad_left() >= dilated_wei_w || padding.pad_right() >= dilated_wei_w || - padding.pad_top() >= dilated_wei_h || padding.pad_bottom() >= dilated_wei_h - ); + ARM_COMPUTE_RETURN_ERROR_ON(padding.pad_left() >= dilated_wei_w || padding.pad_right() >= dilated_wei_w || + padding.pad_top() >= dilated_wei_h || padding.pad_bottom() >= dilated_wei_h); return Status{}; } @@ -351,13 +355,12 @@ void CpuDepthwiseConv2dAssemblyWrapperKernel::run_op(ITensorPack &tensors, const const size_t ld_dst_row = ld_dst_col * (dst_shape[1] + dst_padding.top + dst_padding.bottom); const size_t ld_dst_batch = ld_dst_row * dst_shape[2]; - _kernel_asm->execute(src_ptr, ld_src_col, ld_src_row, ld_src_batch, - parameters_ptr, - dst_ptr, ld_dst_col, ld_dst_row, ld_dst_batch, - working_space, info.thread_id, info.num_threads); + _kernel_asm->execute(src_ptr, ld_src_col, ld_src_row, ld_src_batch, parameters_ptr, dst_ptr, ld_dst_col, ld_dst_row, + ld_dst_batch, working_space, info.thread_id, info.num_threads); } -void CpuDepthwiseConv2dAssemblyWrapperKernel::pack_parameters(void *parameters_ptr, void *bias_ptr, void *weights_ptr, size_t ld_weights_col, size_t ld_weight_row) +void CpuDepthwiseConv2dAssemblyWrapperKernel::pack_parameters( + void *parameters_ptr, void *bias_ptr, void *weights_ptr, size_t ld_weights_col, size_t ld_weight_row) { _kernel_asm->pack_parameters(parameters_ptr, bias_ptr, weights_ptr, ld_weights_col, ld_weight_row); } |