diff options
Diffstat (limited to 'src/runtime')
-rw-r--r-- | src/runtime/CL/CLScheduler.cpp | 10 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLDirectConvolutionLayer.cpp | 82 | ||||
-rw-r--r-- | src/runtime/CL/tuners/BifrostTuner.cpp | 305 | ||||
-rw-r--r-- | src/runtime/CL/tuners/MidgardTuner.cpp | 82 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClDirectConvolution.cpp | 102 | ||||
-rw-r--r-- | src/runtime/gpu/cl/operators/ClDirectConvolution.h | 92 |
6 files changed, 230 insertions, 443 deletions
diff --git a/src/runtime/CL/CLScheduler.cpp b/src/runtime/CL/CLScheduler.cpp index ef5cb03b32..f228cf6513 100644 --- a/src/runtime/CL/CLScheduler.cpp +++ b/src/runtime/CL/CLScheduler.cpp @@ -26,7 +26,6 @@ #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/runtime/CL/CLHelpers.h" #include "arm_compute/runtime/CL/CLTuner.h" -#include "arm_compute/runtime/CL/tuners/Tuners.h" #include "src/core/CL/ICLKernel.h" namespace arm_compute @@ -97,7 +96,7 @@ bool CLScheduler::is_initialised() const std::once_flag CLScheduler::_initialize_symbols; CLScheduler::CLScheduler() - : _context(), _queue(), _target(GPUTarget::MIDGARD), _is_initialised(false), _cl_tuner(nullptr), _cl_default_static_tuner(nullptr), _gemm_heuristics(nullptr) + : _context(), _queue(), _target(GPUTarget::MIDGARD), _is_initialised(false), _cl_tuner(nullptr), _gemm_heuristics(nullptr) { } @@ -116,8 +115,7 @@ void CLScheduler::default_init_with_context(cl::Device &device, cl::Context &ctx cl::CommandQueue queue = cl::CommandQueue(ctx, device); CLKernelLibrary::get().init(cl_kernels_folder, ctx, device); init(ctx, queue, device, cl_tuner, gemm_h); - _cl_default_static_tuner = tuners::TunerFactory::create_tuner(_target); - _cl_tuner = (cl_tuner == nullptr) ? _cl_default_static_tuner.get() : cl_tuner; + _cl_tuner = cl_tuner; } } @@ -133,12 +131,10 @@ void CLScheduler::default_init(ICLTuner *cl_tuner, CLGEMMHeuristicsHandle *gemm_ cl::CommandQueue queue = cl::CommandQueue(ctx, dev); CLKernelLibrary::get().init("./cl_kernels/", ctx, dev); init(ctx, queue, dev, cl_tuner, gemm_h); - // Create a default static tuner and set if none was provided - _cl_default_static_tuner = tuners::TunerFactory::create_tuner(_target); } // Set CL tuner - _cl_tuner = (cl_tuner == nullptr) ? _cl_default_static_tuner.get() : cl_tuner; + _cl_tuner = cl_tuner; } void CLScheduler::set_context(cl::Context context) diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp index 49e97693e4..d60d11aa5f 100644 --- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,18 +28,27 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h" -#include "src/core/CL/kernels/CLFillBorderKernel.h" +#include "src/runtime/gpu/cl/operators/ClActivation.h" +#include "src/runtime/gpu/cl/operators/ClDirectConvolution.h" -using namespace arm_compute; +namespace arm_compute +{ +struct CLDirectConvolutionLayer::Impl +{ + const ICLTensor *src{ nullptr }; + const ICLTensor *weights{ nullptr }; + const ICLTensor *biases{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr<opencl::ClDirectConvolution> op{ nullptr }; +}; CLDirectConvolutionLayer::CLDirectConvolutionLayer() - : _direct_conv_kernel(std::make_unique<CLDirectConvolutionLayerKernel>()), _input_border_handler(std::make_unique<CLFillBorderKernel>()), _activationlayer_function(), - _is_activationlayer_enabled(false) + : _impl(std::make_unique<Impl>()) { } - -CLDirectConvolutionLayer::~CLDirectConvolutionLayer() = default; +CLDirectConvolutionLayer::CLDirectConvolutionLayer(CLDirectConvolutionLayer &&) = default; +CLDirectConvolutionLayer &CLDirectConvolutionLayer::operator=(CLDirectConvolutionLayer &&) = default; +CLDirectConvolutionLayer::~CLDirectConvolutionLayer() = default; void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) { @@ -47,57 +56,32 @@ void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weig } void CLDirectConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, - const PadStrideInfo &conv_info, - const ActivationLayerInfo &act_info) + const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) { - // Set GPU target - _direct_conv_kernel->set_target(CLScheduler::get().target()); - - // Configure direct convolution - _direct_conv_kernel->configure(compile_context, input, weights, biases, output, conv_info); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - // Configure border handler - PixelValue &&zero_value(0.f); - if(is_data_type_quantized_asymmetric(input->info()->data_type())) - { - zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info()); - } - _input_border_handler->configure(compile_context, input, _direct_conv_kernel->border_size(), BorderMode::CONSTANT, zero_value); + _impl->src = input; + _impl->weights = weights; + _impl->biases = biases; + _impl->dst = output; - // Tune kernels - CLScheduler::get().tune_kernel_static(*_direct_conv_kernel); - - _is_activationlayer_enabled = act_info.enabled(); - - //Configure Activation Layer - if(_is_activationlayer_enabled) - { - _activationlayer_function.configure(compile_context, output, nullptr, act_info); - } + _impl->op = std::make_unique<opencl::ClDirectConvolution>(); + _impl->op->configure(compile_context, _impl->src->info(), _impl->weights->info(), _impl->biases->info(), _impl->dst->info(), conv_info, act_info); } Status CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) { - ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, CLScheduler::get().target())); - if(act_info.enabled()) - { - ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info)); - } - return Status{}; + return opencl::ClDirectConvolution::validate(input, weights, biases, output, conv_info, act_info); } void CLDirectConvolutionLayer::run() { - // Run border handler - CLScheduler::get().enqueue(*_input_border_handler, false); - - // Run direct convolution - CLScheduler::get().enqueue(*_direct_conv_kernel); - - //Run Activation Layer - if(_is_activationlayer_enabled) - { - _activationlayer_function.run(); - } + ITensorPack pack; + pack.add_tensor(TensorType::ACL_SRC, _impl->src); + pack.add_tensor(TensorType::ACL_SRC_1, _impl->weights); + pack.add_tensor(TensorType::ACL_SRC_2, _impl->biases); + pack.add_tensor(TensorType::ACL_DST, _impl->dst); + _impl->op->run(pack); } +}
\ No newline at end of file diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp deleted file mode 100644 index fe95829cca..0000000000 --- a/src/runtime/CL/tuners/BifrostTuner.cpp +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (c) 2018-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/runtime/CL/tuners/BifrostTuner.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "src/core/CL/CLKernels.h" -#include "support/Cast.h" - -#include "src/core/gpu/cl/kernels/ClPoolingKernel.h" -#include "src/core/gpu/cl/kernels/ClScaleKernel.h" - -namespace arm_compute -{ -namespace tuners -{ -namespace -{ -/** Tunes a @ref CLDirectConvolutionLayerKernel for a bifrost target - * - * @param[in] k Kernels to tune - */ -void tune_direct_convolution_kernel(CLDirectConvolutionLayerKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - - const GPUTarget gpu_target = k.get_target(); - const DataType dt = k._input->info()->data_type(); - const TensorShape weights_shape = k._weights->info()->tensor_shape(); - const TensorShape inputs_shape = k._input->info()->tensor_shape(); - const size_t kernel_size = weights_shape.x(); - const unsigned int stride_x = k._conv_stride_x; - const unsigned int stride_y = k._conv_stride_y; - - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (kernel_size <= 5) && (stride_x == 1) && (stride_y == 1) && (dt == DataType::F32)) - { - // Through extensive experimentation with over 30 representative tensor - // shapes, we found a small number of local work size configurations - // that result in nearly optimal execution times. Selecting the right - // lws for a given shape, however, required a complex decision tree, - // until we constructed a simple feature as described below. - // - // We started from the number of multiply-accumulate operations for a - // convolution layer, which is equal to the product of the input - // dimensions 0..2 and the weights dimensions 0..2. Unfortunately, - // this resulted in ties between distinct shapes that required distinct - // lws configurations. Replacing the width of the input with the kernel - // size, however, resulted in nearly optimal predictions. We use underscores - // in variable names to indicate when they are intentionally misleading. - const size_t product_of_weights_dimensions = weights_shape[0] * weights_shape[1] * weights_shape[2]; - const size_t product_of_input_dimensions_ = inputs_shape[0] * inputs_shape[1] * inputs_shape[2]; - const float mega_ops_ = 1e-6 * product_of_weights_dimensions * product_of_input_dimensions_; - - switch(kernel_size) - { - case 1: - { - if(mega_ops_ < 1.f) - { - lws_hint = cl::NDRange(1, 1, 8); - } - else if(mega_ops_ < 7.f) - { - lws_hint = cl::NDRange(1, 1, 4); - } - else - { - lws_hint = cl::NDRange(1, 1, 2); - } - break; - } - case 3: - { - if(mega_ops_ < 1.f) - { - lws_hint = cl::NDRange(1, 1, 8); - } - else if(mega_ops_ < 13.f) - { - lws_hint = cl::NDRange(2, 1, 4); - } - else if(mega_ops_ < 50.f) - { - lws_hint = cl::NDRange(3, 1, 4); - } - else - { - lws_hint = cl::NDRange(2, 1, 6); - } - break; - } - case 5: - { - if(mega_ops_ < 2.f || mega_ops_ > 80.f) - { - lws_hint = cl::NDRange(2, 1, 4); - } - else - { - lws_hint = cl::NDRange(2, 1, 8); - } - break; - } - default: - break; - } - k.set_lws_hint(lws_hint); - } -} - -void tune_col2im_kernel(CLCol2ImKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - - // Configure the local work size for Bifrost with a value obtained - // via exhaustive autotuning over 30 representative tensor shapes. - if(gpu_target_is_in(gpu_target, - GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, - GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, - GPUTarget::G52, GPUTarget::G52LIT)) - { - if((k._convolved_dims.width == 7) || (k._convolved_dims.width == 14)) - { - lws_hint = cl::NDRange(1, 7, 1); - } - else - { - lws_hint = cl::NDRange(1, 8, 1); - } - } - - k.set_lws_hint(lws_hint); -} - -void tune_im2col_kernel(CLIm2ColKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - - // Local work size optimized for the 11x11 AlexNet convolution on Bifrost. - if(gpu_target_is_in(gpu_target, - GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, - GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, - GPUTarget::G52, GPUTarget::G52LIT) - && k._kernel_dims.width == 11) - { - const bool is_square_kernel = (k._kernel_dims.width == k._kernel_dims.height); - if(!is_square_kernel && k._kernel_dims.width > 1 && !k._conv_info.has_padding()) - { - lws_hint = cl::NDRange(1, 1, 1); - } - } - k.set_lws_hint(lws_hint); -} - -void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - - // Configure LWS hint - switch(gpu_target) - { - case GPUTarget::G71: - case GPUTarget::G72: - case GPUTarget::G51: - case GPUTarget::G51BIG: - case GPUTarget::G51LIT: - case GPUTarget::G52: - case GPUTarget::G52LIT: - case GPUTarget::G76: - if(k._input1->info()->dimension(1) == 24) - { - // LWS optimized for the 11x11 AlexNet convolution on Bifrost. - lws_hint = cl::NDRange(2, 2); - } - else if(k._output->info()->dimension(1) == 196) - { - lws_hint = cl::NDRange(1, 7); - } - else - { - lws_hint = cl::NDRange(8, 8); - } - break; - default: - lws_hint = cl::NullRange; - } - - k.set_lws_hint(lws_hint); -} - -void tune_pooling_kernel(opencl::kernels::ClPoolingKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - - // Configure the local work size (hint) from the first two dimensions of the global work size. - // On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized - // kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is - // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with). - if(k._pool_info.data_layout == DataLayout::NCHW) - { - if(gpu_target_is_in(gpu_target, - GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, - GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, - GPUTarget::G52, GPUTarget::G52LIT)) - { - cl::NDRange gws = ICLKernel::gws_from_window(k.window()); - lws_hint = cl::NDRange(gws[0], gws[1], 1); - } - } - - k.set_lws_hint(lws_hint); -} - -void tune_scale_kernel(opencl::kernels::ClScaleKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - const DataType dt = k.get_data_type(); - const InterpolationPolicy interpolation = k.get_interpolation_policy(); - - // Configure the local work size for Bifrost, interpolation (bilinear) and datatype F32. - // The value are obtained via exhaustive autotuning. - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (dt == DataType::F32) && (interpolation == InterpolationPolicy::BILINEAR)) - { - const auto dim_0 = k.get_output_x_dim(); - if(dim_0 == 480) - { - lws_hint = cl::NDRange(2, 1); - } - else if(dim_0 == 3120) - { - lws_hint = cl::NDRange(2, 8); - } - else if(dim_0 == 4160) - { - lws_hint = cl::NDRange(4, 8); - } - k.set_lws_hint(lws_hint); - } -} -} // namespace - -void BifrostTuner::tune_kernel_static(ICLKernel &kernel) -{ - if(dynamic_cast<CLDirectConvolutionLayerKernel *>(&kernel) != nullptr) - { - tune_direct_convolution_kernel(*utils::cast::polymorphic_downcast<CLDirectConvolutionLayerKernel *>(&kernel)); - } - else if(dynamic_cast<CLCol2ImKernel *>(&kernel) != nullptr) - { - tune_col2im_kernel(*utils::cast::polymorphic_downcast<CLCol2ImKernel *>(&kernel)); - } - else if(dynamic_cast<CLIm2ColKernel *>(&kernel) != nullptr) - { - tune_im2col_kernel(*utils::cast::polymorphic_downcast<CLIm2ColKernel *>(&kernel)); - } - else if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr) - { - tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel)); - } - else if(dynamic_cast<opencl::kernels::ClPoolingKernel *>(&kernel) != nullptr) - { - tune_pooling_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClPoolingKernel *>(&kernel)); - } - else if(dynamic_cast<opencl::kernels::ClScaleKernel *>(&kernel) != nullptr) - { - tune_scale_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClScaleKernel *>(&kernel)); - } -} - -void BifrostTuner::tune_kernel_dynamic(ICLKernel &kernel) -{ - ARM_COMPUTE_UNUSED(kernel); -} - -void BifrostTuner::tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) -{ - ARM_COMPUTE_UNUSED(kernel, tensors); -} -} // namespace tuners -} // namespace arm_compute
\ No newline at end of file diff --git a/src/runtime/CL/tuners/MidgardTuner.cpp b/src/runtime/CL/tuners/MidgardTuner.cpp deleted file mode 100644 index 72734f2207..0000000000 --- a/src/runtime/CL/tuners/MidgardTuner.cpp +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2018-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/runtime/CL/tuners/MidgardTuner.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "src/core/CL/CLKernels.h" -#include "support/Cast.h" - -namespace arm_compute -{ -namespace tuners -{ -namespace -{ -void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - - switch(gpu_target) - { - case GPUTarget::MIDGARD: - case GPUTarget::T600: - case GPUTarget::T700: - case GPUTarget::T800: - if(k._output->info()->dimension(1) == 196) - { - lws_hint = cl::NDRange(1, 7); - } - else - { - lws_hint = cl::NDRange(8, 8); - } - break; - default: - lws_hint = cl::NullRange; - } - - k.set_lws_hint(lws_hint); -} -} // namespace - -void MidgardTuner::tune_kernel_static(ICLKernel &kernel) -{ - if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr) - { - tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel)); - } -} - -void MidgardTuner::tune_kernel_dynamic(ICLKernel &kernel) -{ - ARM_COMPUTE_UNUSED(kernel); -} - -void MidgardTuner::tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) -{ - ARM_COMPUTE_UNUSED(kernel, tensors); -} -} // namespace tuners -} // namespace arm_compute diff --git a/src/runtime/gpu/cl/operators/ClDirectConvolution.cpp b/src/runtime/gpu/cl/operators/ClDirectConvolution.cpp new file mode 100644 index 0000000000..3382a6c3c5 --- /dev/null +++ b/src/runtime/gpu/cl/operators/ClDirectConvolution.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/runtime/gpu/cl/operators/ClDirectConvolution.h" + +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "src/core/CL/kernels/CLFillBorderKernel.h" +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/kernels/ClActivationKernel.h" +#include "src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace +{ +ITensorPack select_activation_src_dst(ITensorPack &tensors) +{ + ITensorPack pack; + pack.add_tensor(TensorType::ACL_SRC, tensors.get_tensor(TensorType::ACL_DST)); + pack.add_tensor(TensorType::ACL_DST, tensors.get_tensor(TensorType::ACL_DST)); + return pack; +} +} // namespace + +void ClDirectConvolution::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, + const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) +{ + // Configure direct convolution kernel + auto k = std::make_unique<kernels::ClDirectConvolutionKernel>(); + k->set_target(CLScheduler::get().target()); + k->configure(compile_context, src, weights, biases, dst, conv_info); + _direct_conv_kernel = std::move(k); + + // Configure border handler + PixelValue zero_value(0.f); + if(is_data_type_quantized_asymmetric(src->data_type())) + { + zero_value = PixelValue(0, src->data_type(), src->quantization_info()); + } + auto b = std::make_unique<CLFillBorderKernel>(); + b->configure(compile_context, src, _direct_conv_kernel->border_size(), BorderMode::CONSTANT, zero_value); + _src_border_handler = std::move(b); + + if(act_info.enabled()) + { + auto a = std::make_unique<kernels::ClActivationKernel>(); + a->configure(compile_context, dst, dst, act_info); + _activation_kernel = std::move(a); + } + + // Tune kernels + CLScheduler::get().tune_kernel_static(*_direct_conv_kernel); +} + +Status ClDirectConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, + const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClDirectConvolutionKernel::validate(src, weights, biases, dst, conv_info, CLScheduler::get().target())); + if(act_info.enabled()) + { + ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClActivationKernel::validate(dst, dst, act_info)); + } + return Status{}; +} + +void ClDirectConvolution::run(ITensorPack &tensors) +{ + // Run border handler + CLScheduler::get().enqueue_op(*_src_border_handler.get(), tensors, false); + // Run direct convolution + CLScheduler::get().enqueue_op(*_direct_conv_kernel.get(), tensors, false); + // Run activation kernel + if(_activation_kernel) + { + auto act_pack = select_activation_src_dst(tensors); + CLScheduler::get().enqueue_op(*_activation_kernel.get(), act_pack, false); + } +} +} // namespace opencl +} // namespace arm_compute diff --git a/src/runtime/gpu/cl/operators/ClDirectConvolution.h b/src/runtime/gpu/cl/operators/ClDirectConvolution.h new file mode 100644 index 0000000000..e7ad927b0b --- /dev/null +++ b/src/runtime/gpu/cl/operators/ClDirectConvolution.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_DIRECT_CONVOLUTION_H +#define ARM_COMPUTE_CL_DIRECT_CONVOLUTION_H + +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/IClKernel.h" +#include "src/runtime/gpu/cl/IClOperator.h" + +#include <memory> + +namespace arm_compute +{ +namespace opencl +{ +/** Basic function to simulate a directly convolution layer. This function calls the following OpenCL kernels: + * + * -# @ref CLFillBorderKernel (executed if padding size is different from zero) + * -# @ref opencl::ClDirectConvolution + */ +class ClDirectConvolution : public IClOperator +{ +public: + /** Constructor */ + ClDirectConvolution() = default; + /** Set the src and dst tensors. + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor. 3 lower dimensions represent a single src [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of srcs. + * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p src. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type. + * @param[out] dst Destination tensor. 3 lower dimensions represent a single dst [width, height, OFM], while the rest represent batch of dsts. + * Data types supported: Same as @p src. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref ClDirectConvolution + * + * @param[in] src Source tensor. 3 lower dimensions represent a single src [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of srcs. + * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p src. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type. + * @param[in] dst Destination tensor. 3 lower dimensions represent a single dst [width, height, OFM], while the rest represent batch of dsts. + * Data types supported: Same as @p src. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited method overridden + void run(ITensorPack &tensors) override; + +private: + std::unique_ptr<IClKernel> _direct_conv_kernel{ nullptr }; + std::unique_ptr<IClKernel> _src_border_handler{ nullptr }; + std::unique_ptr<IClKernel> _activation_kernel{ nullptr }; +}; +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_DIRECT_CONVOLUTION_H */
\ No newline at end of file |