aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-04-13 13:09:30 +0100
committerManuel Bottini <manuel.bottini@arm.com>2021-04-14 14:21:50 +0000
commit327225d3b2f716d5c62d801a7fafc7d377521f34 (patch)
treec19125b74a5ddf9a63e165cbffa7a85b01c7aff1 /src/runtime
parent21c28957f9c6fe1a28ef934e711bb7474b8d65ee (diff)
downloadComputeLibrary-327225d3b2f716d5c62d801a7fafc7d377521f34.tar.gz
Port NEDirectConvolutionLayer to new API
Partially resolves: COMPMID-4009 Change-Id: I19ffb61c5c4541134a5028677d2d81228740e454 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5419 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp109
-rw-r--r--src/runtime/cpu/operators/CpuDirectConvolution.cpp147
-rw-r--r--src/runtime/cpu/operators/CpuDirectConvolution.h121
3 files changed, 293 insertions, 84 deletions
diff --git a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
index a953edc78f..73834381c6 100644
--- a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,107 +27,48 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h"
-#include "src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEFillBorderKernel.h"
+#include "src/runtime/cpu/operators/CpuDirectConvolution.h"
namespace arm_compute
{
-NEDirectConvolutionLayer::~NEDirectConvolutionLayer() = default;
+struct NEDirectConvolutionLayer::Impl
+{
+ ITensor *src{ nullptr };
+ const ITensor *weights{ nullptr };
+ const ITensor *bias{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<cpu::CpuDirectConvolution> op{ nullptr };
+};
NEDirectConvolutionLayer::NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _activationlayer_function(), _accumulator(), _has_bias(false),
- _is_activationlayer_enabled(false), _dim_split(Window::DimZ), _is_padding_required()
+ : _memory_manager(std::move(memory_manager)), _impl(std::make_unique<Impl>())
{
}
+NEDirectConvolutionLayer::~NEDirectConvolutionLayer() = default;
void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::UNKNOWN);
- _output_stage_kernel = std::make_unique<NEDirectConvolutionLayerOutputStageKernel>();
- _conv_kernel = std::make_unique<NEDirectConvolutionLayerKernel>();
- _input_border_handler = std::make_unique<NEFillBorderKernel>();
-
- // Free accumulator
- if(_accumulator.buffer() != nullptr)
- {
- _accumulator.allocator()->free();
- }
-
- _dim_split = input->info()->data_layout() == DataLayout::NCHW ? Window::DimZ : Window::DimY;
-
- // Check if bias should be added in the convolution result
- _has_bias = (bias != nullptr);
-
- _conv_kernel->configure(input, weights, output, conv_info);
- if(_has_bias)
- {
- _output_stage_kernel->configure(output, bias);
- }
- _is_padding_required = !_conv_kernel->border_size().empty();
-
- if(_is_padding_required)
- {
- // Add zero padding XY
- _input_border_handler->configure(input, _conv_kernel->border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
- }
-
- //Configure Activation Layer
- _is_activationlayer_enabled = act_info.enabled();
- if(_is_activationlayer_enabled)
- {
- _activationlayer_function.configure(output, nullptr, act_info);
- }
+ _impl->src = input;
+ _impl->weights = weights;
+ _impl->bias = bias;
+ _impl->dst = output;
+ _impl->op = std::make_unique<cpu::CpuDirectConvolution>(_memory_manager);
+ _impl->op->configure(input->info(), weights->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), conv_info, act_info);
}
Status NEDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
-
- // output might not be initialized since it can be an intermediate tensor of another layer
- DataType data_type = input->data_type();
- TensorInfo accumulator(output->clone()->set_is_resizable(true).reset_padding().set_data_type(data_type));
-
- // Validate Convolution kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerKernel::validate(input, weights, &accumulator, conv_info));
-
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
- "Biases size and number of input feature maps should match");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1, "Biases should be one dimensional");
- }
-
- // Validate bias kernel
- ARM_COMPUTE_RETURN_ON_ERROR(NEDirectConvolutionLayerOutputStageKernel::validate(&accumulator, bias, output));
-
- if(act_info.enabled())
- {
- ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, act_info));
- }
-
- return Status{};
+ return cpu::CpuDirectConvolution::validate(input, weights, bias, output, conv_info, act_info);
}
void NEDirectConvolutionLayer::run()
{
- MemoryGroupResourceScope scope_mg(_memory_group);
-
- if(_is_padding_required)
- {
- NEScheduler::get().schedule(_input_border_handler.get(), Window::DimZ);
- }
- NEScheduler::get().schedule(_conv_kernel.get(), _dim_split);
- if(_has_bias)
- {
- NEScheduler::get().schedule(_output_stage_kernel.get(), Window::DimY);
- }
-
- if(_is_activationlayer_enabled)
- {
- _activationlayer_function.run();
- }
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC_0, _impl->src);
+ pack.add_tensor(TensorType::ACL_SRC_1, _impl->weights);
+ pack.add_tensor(TensorType::ACL_SRC_2, _impl->bias);
+ pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+ _impl->op->run(pack);
}
} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuDirectConvolution.cpp b/src/runtime/cpu/operators/CpuDirectConvolution.cpp
new file mode 100644
index 0000000000..33f79603e8
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuDirectConvolution.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuDirectConvolution.h"
+
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuDirectConvolution::~CpuDirectConvolution() = default;
+
+CpuDirectConvolution::CpuDirectConvolution(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _output_stage_kernel(), _conv_kernel(), _input_border_handler(), _activationlayer_function(), _accumulator(), _has_bias(false),
+ _is_activationlayer_enabled(false), _dim_split(Window::DimZ), _is_padding_required()
+{
+}
+
+void CpuDirectConvolution::configure(ITensorInfo *src, ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *dst, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN);
+ _output_stage_kernel = std::make_unique<kernels::CpuDirectConvolutionOutputStageKernel>();
+ _conv_kernel = std::make_unique<kernels::CpuDirectConvolutionKernel>();
+ _input_border_handler = std::make_unique<NEFillBorderKernel>();
+
+ // Free accumulator
+ if(_accumulator.buffer() != nullptr)
+ {
+ _accumulator.allocator()->free();
+ }
+
+ _dim_split = src->data_layout() == DataLayout::NCHW ? Window::DimZ : Window::DimY;
+
+ // Check if bias should be added in the convolution result
+ _has_bias = (bias != nullptr);
+
+ _conv_kernel->configure(src, weights, dst, conv_info);
+ if(_has_bias)
+ {
+ _output_stage_kernel->configure(dst, bias);
+ }
+ _is_padding_required = !_conv_kernel->border_size().empty();
+
+ if(_is_padding_required)
+ {
+ // Add zero padding XY
+ _input_border_handler->configure(src, _conv_kernel->border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
+ }
+
+ //Configure Activation Layer
+ _is_activationlayer_enabled = act_info.enabled();
+ if(_is_activationlayer_enabled)
+ {
+ _activationlayer_function = std::make_unique<CpuActivation>();
+ _activationlayer_function->configure(dst, dst, act_info);
+ }
+}
+
+Status CpuDirectConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
+
+ // output might not be initialized since it can be an intermediate tensor of another layer
+ DataType data_type = src->data_type();
+ TensorInfo accumulator(dst->clone()->set_is_resizable(true).reset_padding().set_data_type(data_type));
+
+ // Validate Convolution kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuDirectConvolutionKernel::validate(src, weights, &accumulator, conv_info));
+
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != weights->dimension(3),
+ "Biases size and number of input feature maps should match");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->num_dimensions() > 1, "Biases should be one dimensional");
+ }
+
+ // Validate bias kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuDirectConvolutionOutputStageKernel::validate(&accumulator, bias, dst));
+
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuActivation::validate(dst, nullptr, act_info));
+ }
+
+ return Status{};
+}
+
+void CpuDirectConvolution::run(ITensorPack &tensors)
+{
+ MemoryGroupResourceScope scope_mg(_memory_group);
+
+ auto src = tensors.get_tensor(TensorType::ACL_SRC_0);
+ auto bias = tensors.get_const_tensor(TensorType::ACL_SRC_2);
+ auto dst = tensors.get_tensor(TensorType::ACL_DST);
+
+ if(_is_padding_required)
+ {
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC_DST, src);
+ NEScheduler::get().schedule_op(_input_border_handler.get(), Window::DimZ, _input_border_handler->window(), pack);
+ }
+ NEScheduler::get().schedule_op(_conv_kernel.get(), _dim_split, _conv_kernel->window(), tensors);
+ if(_has_bias)
+ {
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC_0, dst);
+ pack.add_tensor(TensorType::ACL_SRC_1, bias);
+ pack.add_tensor(TensorType::ACL_DST, dst);
+ NEScheduler::get().schedule_op(_output_stage_kernel.get(), Window::DimY, _output_stage_kernel->window(), pack);
+ }
+
+ if(_is_activationlayer_enabled)
+ {
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, dst);
+ pack.add_tensor(TensorType::ACL_DST, dst);
+ _activationlayer_function->run(pack);
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuDirectConvolution.h b/src/runtime/cpu/operators/CpuDirectConvolution.h
new file mode 100644
index 0000000000..0635e087fd
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuDirectConvolution.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_DIRECTCONVOLUTION_H
+#define ARM_COMPUTE_CPU_DIRECTCONVOLUTION_H
+
+#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "src/core/NEON/kernels/NEFillBorderKernel.h"
+#include "src/core/cpu/ICpuKernel.h"
+#include "src/core/cpu/kernels/CpuDirectConvolutionKernel.h"
+#include "src/core/cpu/kernels/CpuDirectConvolutionOutputStageKernel.h"
+#include "src/runtime/cpu/ICpuOperator.h"
+#include "src/runtime/cpu/operators/CpuActivation.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Function to run the direct convolution.
+ *
+ * This function calls the following kernels:
+ *
+ * -# @ref NEFillBorderKernel for the input
+ * -# @ref kernels::CpuDirectConvolutionOutputStageKernel
+ * -# @ref kernels::CpuDirectConvolutionKernel
+ */
+class CpuDirectConvolution : public ICpuOperator
+{
+public:
+ /** Constructor */
+ CpuDirectConvolution(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Destructor */
+ ~CpuDirectConvolution();
+ /** Set the input, weights, biases and output tensors.
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+ * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
+ *
+ * @param[in, out] src Input tensor info. Data types supported: F16/F32.
+ * @param[in] weights Set of kernels to convolve the input volume.
+ * Supported sizes: 1x1, 3x3 and 5x5.
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported: Same as @p src.
+ * @param[in] bias Set of biases. Can be nullptr. Data type supported: Same as @p src.
+ * @param[out] dst Output tensor info.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(ITensorInfo *src, ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *dst, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayer
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+ * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
+ *
+ * @param[in] src Input tensor info. Data types supported: F16/F32.
+ * @param[in] weights Set of kernels to convolve the input volume.
+ * Supported sizes: 1x1, 3x3 and 5x5.
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported: Same as @p src.
+ * @param[in] bias Set of biases. Can be nullptr. Data type supported: Same as @p src.
+ * @param[in] dst Output tensor info.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run(ITensorPack &tensors) override;
+
+private:
+ MemoryGroup _memory_group;
+ std::unique_ptr<kernels::CpuDirectConvolutionOutputStageKernel> _output_stage_kernel;
+ std::unique_ptr<kernels::CpuDirectConvolutionKernel> _conv_kernel;
+ std::unique_ptr<NEFillBorderKernel> _input_border_handler;
+ std::unique_ptr<CpuActivation> _activationlayer_function;
+ Tensor _accumulator;
+ bool _has_bias{ false };
+ bool _is_activationlayer_enabled{ false };
+ unsigned int _dim_split{ 0 };
+ bool _is_padding_required{ false };
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_DIRECTCONVOLUTION_H */