aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2020-09-04 15:25:24 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-09-08 13:58:36 +0000
commit7d0adc602b3a3ff66184632fd388b25384a9bc99 (patch)
treecc314f8902eef0d037fd9353c3e01b2feca526ff
parente8f05da5fb919aa209e1bf0e5c70dd15fff84b7f (diff)
downloadComputeLibrary-7d0adc602b3a3ff66184632fd388b25384a9bc99.tar.gz
COMPMID-3151: Remove NEDepthwiseConvolutionLayer3x3Kernel
Prefer NEDepthwiseConvolutionLayerNativeKernel as it has a native format of NHWC avoiding extra transformation to the NCHW domain. Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: If5d8de11691b8ef7f4c3816941f87417d0c8646b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3930 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h96
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h36
-rw-r--r--docs/00_introduction.dox12
-rw-r--r--src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp317
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp230
7 files changed, 38 insertions, 655 deletions
diff --git a/Android.bp b/Android.bp
index 5bf75500ea..490f58f6ee 100644
--- a/Android.bp
+++ b/Android.bp
@@ -261,7 +261,6 @@ cc_library_static {
"src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp",
"src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp",
"src/core/NEON/kernels/NEDepthToSpaceLayerKernel.cpp",
- "src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp",
"src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.cpp",
"src/core/NEON/kernels/NEDequantizationLayerKernel.cpp",
"src/core/NEON/kernels/NEDerivativeKernel.cpp",
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index f5d3aec27a..4d3e6633c9 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -54,7 +54,6 @@
#include "arm_compute/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthToSpaceLayerKernel.h"
-#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.h"
#include "arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEDerivativeKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
deleted file mode 100644
index 6712e9105a..0000000000
--- a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEDEPTHWISECONVOLUTIONKERNEL3x3_H
-#define ARM_COMPUTE_NEDEPTHWISECONVOLUTIONKERNEL3x3_H
-
-#include "arm_compute/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor. */
-class NEDepthwiseConvolutionLayer3x3Kernel : public INEKernel
-{
-public:
- const char *name() const override
- {
- return "NEDepthwiseConvolutionLayer3x3Kernel";
- }
- /** Default constructor */
- NEDepthwiseConvolutionLayer3x3Kernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthwiseConvolutionLayer3x3Kernel(const NEDepthwiseConvolutionLayer3x3Kernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEDepthwiseConvolutionLayer3x3Kernel &operator=(const NEDepthwiseConvolutionLayer3x3Kernel &) = delete;
- /** Default Move Constructor. */
- NEDepthwiseConvolutionLayer3x3Kernel(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
- /** Default move assignment operator */
- NEDepthwiseConvolutionLayer3x3Kernel &operator=(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
- /** Initialize the function's source, destination, conv and border_size.
- *
- * @note Supported data layouts: NCHW and NHWC
- *
- * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor. This is a 3D tensor with dimensions [3, 3, IFM] for NCHW or [IFM, 3, 3] if NHWC data layout. Data type supported: Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- */
- void configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U));
- /** Static function to check if given info will lead to a valid configuration of @ref NEDepthwiseConvolutionLayer3x3Kernel
- *
- * @note Supported data layouts: NCHW and NHWC
- *
- * @param[in] input Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] weights Weights tensor info. This is a 3D tensor with dimensions [3, 3, IFM] for NCHW or [IFM, 3, 3] if NHWC data layout. Data type supported: Same as @p input.
- * @param[in] output Destination tensor info. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
- const Size2D &dilation = Size2D(1U, 1U));
-
- // Inherited methods overridden:
- void run(const Window &window, const ThreadInfo &info) override;
- BorderSize border_size() const override;
-
-private:
- BorderSize _border_size;
- const ITensor *_input;
- ITensor *_output;
- const ITensor *_weights;
- PadStrideInfo _conv_info;
- unsigned int _num_elems_written_per_iteration;
- unsigned int _depth_multiplier;
- Size2D _dilation;
-};
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_NEDEPTHWISECONVOLUTIONKERNEL3x3_H */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
index 116ac16ce7..3b75bb11a7 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h
@@ -24,7 +24,6 @@
#ifndef ARM_COMPUTE_NEDEPTHWISECONVOLUTION_H
#define ARM_COMPUTE_NEDEPTHWISECONVOLUTION_H
-#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.h"
#include "arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h"
#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
@@ -171,41 +170,7 @@ private:
void prepare() override;
private:
- /** Configure the kernels/functions for the generic pipeline.
- *
- * @param[in, out] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM]. Data type supported: Same as @p input.
- * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info Activation layer information in case of a fused activation.
- * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
- *
- */
- void configure_generic(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation = Size2D(1U, 1U));
- /** Configure the kernels/functions for the optimized pipeline.
- *
- * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32. (Written to only for border filling).
- * @param[in] weights Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM]. Data type supported: Same as @p input.
- * @param[in] biases Biases tensor. A 1D tensor with shape [IFM]. Must be nullptr if not needed.
- * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
- * @param[out] output Destination tensor. Data type supported: same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution.
- * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
- * @param[in] act_info Activation layer information in case of a fused activation.
- */
- void configure_optimized(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation = Size2D(1U, 1U));
- /** Run generic kernel */
- void run_generic();
- /** Run optimized function */
- void run_optimized();
-
MemoryGroup _memory_group;
- NEDepthwiseConvolutionLayer3x3Kernel _dwc_kernel;
NEDepthwiseConvolutionAssemblyDispatch _dwc_optimized_func;
NEDirectConvolutionLayerOutputStageKernel _output_stage_kernel;
NEFillBorderKernel _border_handler;
@@ -220,7 +185,6 @@ private:
const ITensor *_original_weights;
bool _has_bias;
bool _is_quantized;
- bool _is_optimized;
bool _is_nchw;
bool _permute;
bool _is_activationlayer_enabled;
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index 9db7e57cbe..0ba1d3ef0b 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -415,7 +415,7 @@ v20.02 Public major release
- @ref NEComparisonOperationKernel
- @ref NEConvolutionLayer
- @ref NEDepthwiseConvolutionLayer
- - @ref NEDepthwiseConvolutionLayer3x3Kernel
+ - NEDepthwiseConvolutionLayer3x3Kernel
- @ref NEDirectConvolutionLayerOutputStageKernel
- @ref NEElementwiseComparison
- @ref NEElementwiseMax
@@ -427,7 +427,7 @@ v20.02 Public major release
- @ref NEPoolingLayer
- @ref NEPReluLayer
- Added support for QSYMM8_PER_CHANNEL in:
- - @ref NEDepthwiseConvolutionLayer3x3Kernel
+ - NEDepthwiseConvolutionLayer3x3Kernel
- Added support for split sizes in:
- @ref CLSplit
- @ref NESplit
@@ -795,11 +795,11 @@ v18.11 Public major release
- @ref CLL2NormalizeLayer
- Added QASYMM8 support to the following kernels:
- @ref CLScaleKernel
- - @ref NEDepthwiseConvolutionLayer3x3Kernel
+ - NEDepthwiseConvolutionLayer3x3Kernel
- @ref CLPixelWiseMultiplicationKernel
- Added FP16 support to the following kernels:
- @ref CLDepthwiseConvolutionLayer3x3NHWCKernel
- - @ref NEDepthwiseConvolutionLayer3x3Kernel
+ - NEDepthwiseConvolutionLayer3x3Kernel
- @ref CLNormalizePlanarYUVLayerKernel
- @ref CLWinogradConvolutionLayer (5x5 kernel)
- More tests added to both validation and benchmarking suites.
@@ -950,7 +950,7 @@ v18.01 Public maintenance release
- Refactored NEON Winograd (NEWinogradLayerKernel)
- Added @ref NEDirectConvolutionLayerOutputStageKernel
- Added QASYMM8 support to the following NEON kernels:
- - @ref NEDepthwiseConvolutionLayer3x3Kernel
+ - NEDepthwiseConvolutionLayer3x3Kernel
- @ref NEFillBorderKernel
- @ref NEPoolingLayerKernel
- Added new examples:
@@ -994,7 +994,7 @@ v17.12 Public major release
- New NEON kernels / functions
- arm_compute::NEGEMMLowpAArch64A53Kernel / arm_compute::NEGEMMLowpAArch64Kernel / arm_compute::NEGEMMLowpAArch64V8P4Kernel / arm_compute::NEGEMMInterleavedBlockedKernel / arm_compute::NEGEMMLowpAssemblyMatrixMultiplyCore
- arm_compute::NEHGEMMAArch64FP16Kernel
- - @ref NEDepthwiseConvolutionLayer3x3Kernel / NEDepthwiseIm2ColKernel / NEGEMMMatrixVectorMultiplyKernel / NEDepthwiseVectorToTensorKernel / @ref NEDepthwiseConvolutionLayer
+ - NEDepthwiseConvolutionLayer3x3Kernel / NEDepthwiseIm2ColKernel / NEGEMMMatrixVectorMultiplyKernel / NEDepthwiseVectorToTensorKernel / @ref NEDepthwiseConvolutionLayer
- @ref NEGEMMLowpOffsetContributionKernel / @ref NEGEMMLowpMatrixAReductionKernel / @ref NEGEMMLowpMatrixBReductionKernel / @ref NEGEMMLowpMatrixMultiplyCore
- @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel / @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
- NEWinogradLayer / NEWinogradLayerKernel
diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
deleted file mode 100644
index 134ebb0e41..0000000000
--- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
-#include "arm_compute/core/NEON/kernels/detail/NEDirectConvolutionDetail.h"
-
-#include "arm_compute/core/AccessWindowStatic.h"
-#include "arm_compute/core/CPP/Validate.h"
-#include "arm_compute/core/Coordinates.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/NEON/INEKernel.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
-namespace arm_compute
-{
-namespace
-{
-template <typename T1, typename T2, unsigned int stridex>
-class convolver_3x3
-{
-public:
- static void convolve(const Window &window, unsigned int num_elems_written_per_iteration,
- const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
- {
- const int input_offset = -input->info()->quantization_info().uniform().offset;
- const int weights_offset = -weights->info()->quantization_info().uniform().offset;
-
- const int input_stride_x = input->info()->strides_in_bytes().x();
- const int input_stride_y = input->info()->strides_in_bytes().y();
- const int input_stride_z = input->info()->strides_in_bytes().z();
- const int input_stride_w = input->info()->strides_in_bytes()[3];
- const int output_stride_y = output->info()->strides_in_bytes().y();
- const int kernel_stride_y = weights->info()->strides_in_bytes().y();
- const int kernel_stride_z = weights->info()->strides_in_bytes().z();
- const int output_w = output->info()->dimension(0);
- const int output_h = output->info()->dimension(1);
- const int delta_input = detail::get_input_num_elems_processed(num_elems_written_per_iteration, stridex);
- const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
- const unsigned int conv_pad_x = conv_info.pad_left();
- const unsigned int conv_pad_y = conv_info.pad_top();
-
- // setup output window for the iterator
- Window window_out = window;
- window_out.set(Window::DimX, Window::Dimension(0, output->info()->dimension(Window::DimX), output->info()->dimension(Window::DimX)));
- window_out.set(Window::DimY, Window::Dimension(0, output->info()->dimension(Window::DimY), output->info()->dimension(Window::DimY)));
-
- // setup input window for the iterator
- Window window_in = window;
- // Iteration of input is taken care of in execute_window_loop
- window_in.set(Window::DimX, Window::Dimension(0, 0, 0));
- window_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- window_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
-
- Window window_k = calculate_max_window(*weights->info(), Steps(1u));
-
- Iterator in(input, window_in);
- Iterator out(output, window_out);
- Iterator w(weights, window_k);
-
- const uint8_t *weights_ptr = w.ptr();
-
- execute_window_loop(window_out, [&](const Coordinates & id)
- {
- int ih = 0;
- int oh = 0;
-
- const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y + (id.z() / depth_multiplier) * input_stride_z + input_stride_w * id[3];
- const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z;
-
- const auto ptr_weights_r0 = reinterpret_cast<const T1 *>(ptr_weights_base);
- const auto ptr_weights_r1 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y);
- const auto ptr_weights_r2 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y * 2);
- const auto vw_r0 = detail::load_matrix_row(ptr_weights_r0, weights_offset);
- const auto vw_r1 = detail::load_matrix_row(ptr_weights_r1, weights_offset);
- const auto vw_r2 = detail::load_matrix_row(ptr_weights_r2, weights_offset);
-
- for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y)
- {
- auto in_top = reinterpret_cast<const T1 *>(input_ptr + (ih + 0) * input_stride_y);
- auto in_mid = reinterpret_cast<const T1 *>(input_ptr + (ih + dilation.y()) * input_stride_y);
- auto in_low = reinterpret_cast<const T1 *>(input_ptr + (ih + 2 * dilation.y()) * input_stride_y); // uint8/int8
- auto p_out = reinterpret_cast<T2 *>(out.ptr() + oh * output_stride_y); // int32
-
- for(int ow = 0; ow < output_w; ow += num_elems_written_per_iteration,
- in_top += delta_input, in_mid += delta_input, in_low += delta_input,
- p_out += num_elems_written_per_iteration)
- {
- if(dilation == Size2D(1U, 1U))
- {
- detail::convolve_3x3<false>(in_top, in_mid, in_low, p_out, vw_r0, vw_r1, vw_r2, stridex, input_offset);
- }
- else
- {
- auto vres = detail::convolve_3x3_dilation(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, dilation.x(), stridex, input_offset);
- detail::store_results<stridex>(p_out, vres);
- }
- }
- }
- },
- out);
- }
-};
-
-template <typename T1, typename T2>
-inline void convolve_3x3(const Window &window, unsigned int num_elems_written_per_iteration,
- const ITensor *input, const ITensor *weights, ITensor *output,
- const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
-{
- const unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- switch(conv_stride_x)
- {
- case 1:
- convolver_3x3<T1, T2, 1>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation);
- break;
- case 2:
- convolver_3x3<T1, T2, 2>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation);
- break;
- case 3:
- convolver_3x3<T1, T2, 3>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation);
- break;
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-}
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
-
- const DataLayout data_layout = input->data_layout();
- const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
- ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) != 3 || weights->dimension(height_idx) != 3);
- ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3);
-
- if(output->total_size() != 0)
- {
- const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
-
- if(is_data_type_quantized_asymmetric(input->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON(output->data_type() != DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
- const Size2D &dilation)
-{
- Window win;
- bool window_changed = false;
-
- // Get convolved dimensions
- const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
- const DataType output_dt = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type();
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt).set_quantization_info(output->quantization_info()));
-
- // Configure kernel window (generic)
- const unsigned int conv_stride_x = conv_info.stride().first;
- const unsigned int conv_stride_y = conv_info.stride().second;
- const unsigned int conv_pad_top = conv_info.pad_top();
- const unsigned int conv_pad_left = conv_info.pad_left();
-
- unsigned int num_elems_written_per_iteration = 16 >> conv_stride_x;
- unsigned int num_elems_read_per_iteration = 0;
-
- switch(input->data_type())
- {
- case DataType::QASYMM8:
- case DataType::QASYMM8_SIGNED:
- num_elems_read_per_iteration = 16 + 15 * (dilation.x() - 1);
- break;
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F16:
- num_elems_written_per_iteration = 32 >> conv_stride_x;
- num_elems_read_per_iteration = 24 + 23 * (dilation.x() - 1);
- break;
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F32:
- num_elems_read_per_iteration = 12 + 11 * (dilation.x() - 1);
- break;
- default:
- ARM_COMPUTE_ERROR("Data type not supported.");
- }
-
- // Configure kernel window
- win = calculate_max_window(*output, Steps(num_elems_written_per_iteration));
-
- AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3 + 2 * (dilation.y() - 1), conv_stride_x, conv_stride_y);
- AccessWindowStatic weights_access(weights, 0, 0, 3, 3);
- AccessWindowHorizontal output_access(output, 0, num_elems_written_per_iteration);
-
- window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
- : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0), _depth_multiplier(1), _dilation()
-{
-}
-
-BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
-{
- return _border_size;
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
- const Size2D &dilation)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, dilation));
-
- _input = input;
- _output = output;
- _weights = weights;
- _conv_info = conv_info;
- _depth_multiplier = depth_multiplier;
- switch(input->info()->data_type())
- {
- case DataType::QASYMM8:
- case DataType::QASYMM8_SIGNED:
- case DataType::F32:
- _num_elems_written_per_iteration = 16 >> _conv_info.stride().first;
- break;
- case DataType::F16:
- _num_elems_written_per_iteration = 32 >> _conv_info.stride().first;
- break;
- default:
- ARM_COMPUTE_ERROR("Data type not supported.");
- }
- _border_size = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left());
- _dilation = dilation;
- auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, dilation);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- INEKernel::configure(win_config.second);
-}
-
-Status NEDepthwiseConvolutionLayer3x3Kernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
- const Size2D &dilation)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier, dilation));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier, dilation).first);
- return Status{};
-}
-
-void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const ThreadInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_UNUSED(info);
-
- ARM_COMPUTE_UNUSED(info);
-
- switch(_input->info()->data_type())
- {
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F16:
- convolve_3x3<float16_t, float16_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
- break;
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F32:
- convolve_3x3<float, float>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
- break;
- case DataType::QASYMM8:
- convolve_3x3<uint8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
- break;
- case DataType::QASYMM8_SIGNED:
- convolve_3x3<int8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
- break;
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-}
-} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index cfdf2038b9..915a2830bf 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -58,24 +58,7 @@ Status validate_arguments_optimized(const ITensorInfo *input, const ITensorInfo
ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(channel_idx));
}
- const bool is_quantized = (!is_data_type_quantized_per_channel(weights->data_type())) && is_data_type_quantized_asymmetric(input->data_type());
-
- if(!NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input, weights, conv_info, depth_multiplier, dilation))
- {
- TensorInfo accumulator = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
- ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionLayer3x3Kernel::validate(input, weights, is_quantized ? &accumulator : output, conv_info, depth_multiplier, dilation));
-
- if(is_quantized)
- {
- DirectConvolutionLayerOutputStageKernelInfo direct_conv_info;
- direct_conv_info.output_data_type = input->data_type();
- ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, biases, output, direct_conv_info));
- }
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionAssemblyDispatch::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation));
- }
+ ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseConvolutionAssemblyDispatch::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation));
//Validate Activation Layer
if(act_info.enabled())
@@ -87,117 +70,34 @@ Status validate_arguments_optimized(const ITensorInfo *input, const ITensorInfo
} // namespace
NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::NEDepthwiseConvolutionLayerOptimizedInternal(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _dwc_kernel(), _dwc_optimized_func(memory_manager), _output_stage_kernel(), _border_handler(), _permute_input(), _permute_weights(), _permute_output(),
- _activationlayer_function(), _accumulator(), _permuted_input(), _permuted_weights(), _permuted_output(), _original_weights(nullptr), _has_bias(false), _is_quantized(false), _is_optimized(false),
- _is_nchw(true), _permute(false), _is_activationlayer_enabled(false), _is_prepared(false)
+ : _memory_group(memory_manager), _dwc_optimized_func(memory_manager), _output_stage_kernel(), _border_handler(), _permute_input(), _permute_weights(), _permute_output(), _activationlayer_function(),
+ _accumulator(), _permuted_input(), _permuted_weights(), _permuted_output(), _original_weights(nullptr), _has_bias(false), _is_quantized(false), _is_nchw(true), _permute(false),
+ _is_activationlayer_enabled(false), _is_prepared(false)
{
}
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure_generic(ITensor *input,
- const ITensor *weights,
- const ITensor *biases,
- ITensor *output,
- const PadStrideInfo &conv_info,
- unsigned int depth_multiplier,
- const ActivationLayerInfo &act_info,
- const Size2D &dilation)
+void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure(ITensor *input,
+ const ITensor *weights,
+ const ITensor *biases,
+ ITensor *output, const PadStrideInfo &conv_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
{
- ARM_COMPUTE_UNUSED(act_info);
-
- PixelValue zero_value(0.f);
-
- // Initialize the intermediate accumulator tensor in case of quantized input
- if(_is_quantized)
- {
- TensorShape accum_shape = output->info()->tensor_shape();
- DataLayout accum_layout = output->info()->data_layout();
- if(!_is_nchw)
- {
- permute(accum_shape, PermutationVector(1U, 2U, 0U));
- accum_layout = DataLayout::NCHW;
- }
-
- _memory_group.manage(&_accumulator);
- _accumulator.allocator()->init(TensorInfo(accum_shape, 1, DataType::S32, output->info()->quantization_info()));
- _accumulator.info()->set_data_layout(accum_layout);
- zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().uniform().offset));
- }
-
- if(!_is_nchw)
- {
- _memory_group.manage(&_permuted_input);
- _memory_group.manage(&_permuted_output);
-
- // Configure the function to transform the input tensor from NHWC -> NCHW
- _permute_input.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
- _permuted_input.info()->set_data_layout(DataLayout::NCHW);
-
- // Configure the function to transform the weights tensor from HWI -> IHW
- _permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
- _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
- _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
-
- // Configure depthwise
- _dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier, dilation);
-
- // Configure border handler
- _border_handler.configure(&_permuted_input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value);
-
- // Allocate tensors
- _permuted_input.allocator()->allocate();
- }
- else
- {
- // Configure depthwise convolution kernel
- _dwc_kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info, depth_multiplier, dilation);
-
- // Configure border handler
- _border_handler.configure(input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value);
- }
-
- // Configure biases accumulation
- if(_is_quantized)
- {
- const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = (output->info()->total_size() == 0) ? iq_info : output->info()->quantization_info().uniform();
-
- float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
- int32_t output_multiplier;
- int32_t output_shift;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
-
- DirectConvolutionLayerOutputStageKernelInfo direct_conv_info;
- direct_conv_info.result_fixedpoint_multiplier = output_multiplier;
- direct_conv_info.result_shift = output_shift;
- direct_conv_info.result_offset_after_shift = oq_info.offset;
- direct_conv_info.output_data_type = input->info()->data_type();
- _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, direct_conv_info);
- _accumulator.allocator()->allocate();
- }
- else if(_has_bias)
- {
- _output_stage_kernel.configure(_is_nchw ? output : &_permuted_output, biases);
- }
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+ // Perform validation step
+ ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionLayerOptimizedInternal::validate(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(),
+ output->info(), conv_info, depth_multiplier, act_info, dilation));
- // Permute output
- if(!_is_nchw)
- {
- // Configure the function to transform the convoluted output to NHWC
- _permute_output.configure(&_permuted_output, output, PermutationVector(2U, 0U, 1U));
- _permuted_output.allocator()->allocate();
- }
-}
+ _original_weights = weights;
+ _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+ _has_bias = biases != nullptr;
+ _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
+ _permute = _is_nchw;
+ _is_prepared = false;
+ _is_activationlayer_enabled = act_info.enabled();
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure_optimized(const ITensor *input,
- const ITensor *weights,
- const ITensor *biases,
- ITensor *output,
- const PadStrideInfo &conv_info,
- unsigned int depth_multiplier,
- const ActivationLayerInfo &act_info,
- const Size2D &dilation)
-{
+ // Configure pipeline
ActivationLayerInfo act_info_to_use = ActivationLayerInfo();
const bool is_relu = arm_compute::utils::info_helpers::is_relu(act_info);
const bool is_relu6 = arm_compute::utils::info_helpers::is_relu6(act_info);
@@ -238,43 +138,6 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
{
_dwc_optimized_func.configure(input, weights, biases, output, conv_info, depth_multiplier, act_info_to_use, dilation);
}
-}
-
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::configure(ITensor *input,
- const ITensor *weights,
- const ITensor *biases,
- ITensor *output, const PadStrideInfo &conv_info,
- unsigned int depth_multiplier,
- const ActivationLayerInfo &act_info,
- const Size2D &dilation)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(NEDepthwiseConvolutionLayerOptimizedInternal::validate(input->info(), weights->info(), (biases == nullptr) ? nullptr : biases->info(),
- output->info(), conv_info, depth_multiplier, act_info, dilation));
-
- _original_weights = weights;
- _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
- _has_bias = biases != nullptr;
- _is_optimized = NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input->info(),
- weights->info(),
- conv_info,
- depth_multiplier,
- dilation);
- _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
- _permute = _is_optimized == _is_nchw;
- _is_prepared = false;
- _is_activationlayer_enabled = act_info.enabled();
-
- // Configure appropriate pipeline
- if(_is_optimized)
- {
- configure_optimized(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- }
- else
- {
- configure_generic(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
- }
// Configure activation
if(_is_activationlayer_enabled)
@@ -295,29 +158,18 @@ Status NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal
return validate_arguments_optimized(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
}
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::run_generic()
+void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::run()
{
- // Fill border
- NEScheduler::get().schedule(&_border_handler, Window::DimX);
-
- // Execute depthwise convolution
- NEScheduler::get().schedule(&_dwc_kernel, Window::DimX);
+ prepare();
- // Add biases
- if(_has_bias || _is_quantized)
- {
- NEScheduler::get().schedule(&_output_stage_kernel, Window::DimX);
- }
+ MemoryGroupResourceScope scope_mg(_memory_group);
- // Permute output
- if(!_is_nchw)
+ // Permute input
+ if(_permute)
{
- _permute_output.run();
+ _permute_input.run();
}
-}
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::run_optimized()
-{
// Run assembly function
_dwc_optimized_func.run();
@@ -326,21 +178,6 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
{
_permute_output.run();
}
-}
-
-void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::run()
-{
- prepare();
-
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- // Permute input
- if(_permute)
- {
- _permute_input.run();
- }
-
- _is_optimized ? run_optimized() : run_generic();
// Run activation
if(_is_activationlayer_enabled)
@@ -362,13 +199,10 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
}
// Prepare optimized function
- if(_is_optimized)
+ _dwc_optimized_func.prepare();
+ if(!_permuted_weights.is_used())
{
- _dwc_optimized_func.prepare();
- if(!_permuted_weights.is_used())
- {
- _permuted_weights.allocator()->free();
- }
+ _permuted_weights.allocator()->free();
}
_is_prepared = true;