From e1314665fcfd2a32d6117a8fc16f67a83db3bb05 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Mon, 1 Feb 2021 17:09:32 +0000 Subject: Make CL Pooling kernels and functions state-less Resolves COMPMID-4000 Change-Id: I64878f93c033b4928fdefbb964c37c67fdecfaab Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4971 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Manuel Bottini Reviewed-by: Georgios Pinitas --- src/runtime/CL/functions/CLPoolingLayer.cpp | 83 +++++++++-------------- src/runtime/CL/tuners/BifrostTuner.cpp | 12 ++-- src/runtime/gpu/cl/operators/ClPooling.cpp | 101 ++++++++++++++++++++++++++++ src/runtime/gpu/cl/operators/ClPooling.h | 75 +++++++++++++++++++++ 4 files changed, 216 insertions(+), 55 deletions(-) create mode 100644 src/runtime/gpu/cl/operators/ClPooling.cpp create mode 100644 src/runtime/gpu/cl/operators/ClPooling.h (limited to 'src/runtime') diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp index f3a2dbdd51..fbaec1d2d9 100644 --- a/src/runtime/CL/functions/CLPoolingLayer.cpp +++ b/src/runtime/CL/functions/CLPoolingLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,13 +23,27 @@ */ #include "arm_compute/runtime/CL/functions/CLPoolingLayer.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/runtime/CL/CLScheduler.h" -#include "src/core/CL/kernels/CLFillBorderKernel.h" -#include "src/core/CL/kernels/CLPoolingLayerKernel.h" +#include "src/core/CL/ICLKernel.h" +#include "src/runtime/gpu/cl/operators/ClPooling.h" namespace arm_compute { +struct CLPoolingLayer::Impl +{ + const ICLTensor *src{ nullptr }; + ICLTensor *dst{ nullptr }; + ICLTensor *indices{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLPoolingLayer::CLPoolingLayer() + : _impl(std::make_unique()) +{ +} +CLPoolingLayer::~CLPoolingLayer() = default; + void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) { configure(CLKernelLibrary::get().get_compile_context(), input, output, pool_info, indices); @@ -37,56 +51,25 @@ void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const Poolin void CLPoolingLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info, ICLTensor *indices) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input); - // Configure pooling kernel - auto k = std::make_unique(); - k->set_target(CLScheduler::get().target()); - k->configure(compile_context, input, output, pool_info, indices); - _kernel = std::move(k); - - const DataType data_type = input->info()->data_type(); + _impl->src = input; + _impl->dst = output; + _impl->indices = indices; - // Configure border depending on operation required (quantize border in case of asymmetric data_type) - BorderMode border_mode{}; - PixelValue pixel_value(0.f); - if(is_data_type_quantized_asymmetric(data_type) && !pool_info.exclude_padding) - { - pixel_value = PixelValue(0, data_type, input->info()->quantization_info()); - } - - // Data layout - const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout; - - switch(data_layout) - { - case DataLayout::NCHW: - border_mode = (PoolingType::MAX == pool_info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT; - break; - case DataLayout::NHWC: - border_mode = BorderMode::CONSTANT; - if(PoolingType::MAX == pool_info.pool_type) - { - if(is_data_type_quantized(data_type)) - { - std::tie(pixel_value, std::ignore) = get_min_max(data_type); - } - else - { - pixel_value = PixelValue(std::numeric_limits::lowest()); - } - } - break; - default: - ARM_COMPUTE_ERROR("Data layout not supported"); - } - _border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, pixel_value); - - // Tune kernels - CLScheduler::get().tune_kernel_static(*_kernel); + _impl->op = std::make_unique(); + _impl->op->configure(compile_context, input->info(), output->info(), pool_info, (indices) ? indices->info() : nullptr); } Status CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices) { - return CLPoolingLayerKernel::validate(input, output, pool_info, indices); + return opencl::ClPooling::validate(input, output, pool_info, indices); +} + +void CLPoolingLayer::run() +{ + ITensorPack pack; + pack.add_tensor(TensorType::ACL_SRC, _impl->src); + pack.add_tensor(TensorType::ACL_DST_0, _impl->dst); + pack.add_tensor(TensorType::ACL_DST_1, _impl->indices); + _impl->op->run(pack); } } // namespace arm_compute diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp index 8badd57b9e..7a06de6d1c 100644 --- a/src/runtime/CL/tuners/BifrostTuner.cpp +++ b/src/runtime/CL/tuners/BifrostTuner.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,8 @@ #include "src/core/CL/CLKernels.h" #include "support/Cast.h" +#include "src/core/gpu/cl/kernels/ClPoolingKernel.h" + namespace arm_compute { namespace tuners @@ -208,7 +210,7 @@ void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k) k.set_lws_hint(lws_hint); } -void tune_pooling_kernel(CLPoolingLayerKernel &k) +void tune_pooling_kernel(opencl::kernels::ClPoolingKernel &k) { cl::NDRange lws_hint = k.lws_hint(); const GPUTarget gpu_target = k.get_target(); @@ -217,7 +219,7 @@ void tune_pooling_kernel(CLPoolingLayerKernel &k) // On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized // kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with). - if(k._input->info()->data_layout() == DataLayout::NCHW) + if(k._pool_info.data_layout == DataLayout::NCHW) { if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, @@ -279,9 +281,9 @@ void BifrostTuner::tune_kernel_static(ICLKernel &kernel) { tune_gemm_kernel(*utils::cast::polymorphic_downcast(&kernel)); } - else if(dynamic_cast(&kernel) != nullptr) + else if(dynamic_cast(&kernel) != nullptr) { - tune_pooling_kernel(*utils::cast::polymorphic_downcast(&kernel)); + tune_pooling_kernel(*utils::cast::polymorphic_downcast(&kernel)); } else if(dynamic_cast(&kernel) != nullptr) { diff --git a/src/runtime/gpu/cl/operators/ClPooling.cpp b/src/runtime/gpu/cl/operators/ClPooling.cpp new file mode 100644 index 0000000000..8610eb9842 --- /dev/null +++ b/src/runtime/gpu/cl/operators/ClPooling.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/runtime/gpu/cl/operators/ClPooling.h" + +#include "arm_compute/runtime/CL/CLScheduler.h" + +#include "src/core/CL/kernels/CLFillBorderKernel.h" +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/kernels/ClPoolingKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +void ClPooling::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, ITensorInfo *indices) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src); + // Configure pooling kernel + auto k = std::make_unique(); + k->set_target(CLScheduler::get().target()); + k->configure(compile_context, src, dst, info, indices); + _pooling = std::move(k); + + const DataType data_type = src->data_type(); + + // Configure border depending on operation required (quantize border in case of asymmetric data_type) + BorderMode border_mode{}; + PixelValue pixel_value(0.f); + if(is_data_type_quantized_asymmetric(data_type) && !info.exclude_padding) + { + pixel_value = PixelValue(0, data_type, src->quantization_info()); + } + + // Data layout + const auto data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout; + + switch(data_layout) + { + case DataLayout::NCHW: + border_mode = (PoolingType::MAX == info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT; + break; + case DataLayout::NHWC: + border_mode = BorderMode::CONSTANT; + if(PoolingType::MAX == info.pool_type) + { + if(is_data_type_quantized(data_type)) + { + std::tie(pixel_value, std::ignore) = get_min_max(data_type); + } + else + { + pixel_value = PixelValue(std::numeric_limits::lowest()); + } + } + break; + default: + ARM_COMPUTE_ERROR("Data layout not supported"); + } + auto b = std::make_unique(); + b->configure(compile_context, src, _pooling->border_size(), border_mode, pixel_value); + _border_handler = std::move(b); + + // Tune kernels + CLScheduler::get().tune_kernel_static(*_pooling); +} + +Status ClPooling::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices) +{ + return kernels::ClPoolingKernel::validate(src, dst, info, indices); +} + +void ClPooling::run(ITensorPack &tensors) +{ + ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided"); + + CLScheduler::get().enqueue_op(*_border_handler.get(), tensors, false); + CLScheduler::get().enqueue_op(*_pooling.get(), tensors, false); +} +} // namespace opencl +} // namespace arm_compute diff --git a/src/runtime/gpu/cl/operators/ClPooling.h b/src/runtime/gpu/cl/operators/ClPooling.h new file mode 100644 index 0000000000..99de6d0dcf --- /dev/null +++ b/src/runtime/gpu/cl/operators/ClPooling.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_POOLING_H +#define ARM_COMPUTE_CL_POOLING_H + +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/runtime/gpu/cl/IClOperator.h" + +#include + +namespace arm_compute +{ +namespace opencl +{ +/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following OpenCL kernels: + * + * -# @ref CLFillBorderKernel (executed if padding size is different from zero) + * -# @ref opencl::ClPooling + */ +class ClPooling : public IClOperator +{ +public: + /** Constructor */ + ClPooling() = default; + /** Configure operator for a given list of arguments + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[out] dst Destination tensor info. Data type supported: same as @p src + * @param[in] info Pooling layer parameters. + * @param[out] indices (optional) The indices info of the maximal values. Data type supported: U32. + */ + void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, ITensorInfo *indices = nullptr); + /** Static function to check if given info will lead to a valid configuration of @ref ClPooling + * + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[out] dst Destination tensor info. Data type supported: same as @p src + * @param[in] info Pooling layer parameters. + * @param[out] indices (optional) The indices info of the maximal values. Data type supported: U32. + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices = nullptr); + + // Inherited method overridden + void run(ITensorPack &tensors) override; + +private: + std::unique_ptr _pooling{ nullptr }; + std::unique_ptr _border_handler{ nullptr }; +}; +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_POOLING_H */ -- cgit v1.2.1