aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp5
-rw-r--r--arm_compute/runtime/CL/CLScheduler.h13
-rw-r--r--arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h25
-rw-r--r--arm_compute/runtime/CL/tuners/BifrostTuner.h44
-rw-r--r--arm_compute/runtime/CL/tuners/MidgardTuner.h44
-rw-r--r--arm_compute/runtime/CL/tuners/Tuners.h56
-rw-r--r--docs/00_introduction.dox4
-rw-r--r--src/core/CL/CLKernels.h1
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.h126
-rw-r--r--src/core/gpu/cl/kernels/ClDirectConvolutionKernel.cpp (renamed from src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp)250
-rw-r--r--src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h97
-rw-r--r--src/core/gpu/cl/kernels/ClScaleKernel.cpp23
-rw-r--r--src/core/gpu/cl/kernels/ClScaleKernel.h23
-rw-r--r--src/runtime/CL/CLScheduler.cpp10
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp82
-rw-r--r--src/runtime/CL/tuners/BifrostTuner.cpp305
-rw-r--r--src/runtime/CL/tuners/MidgardTuner.cpp82
-rw-r--r--src/runtime/gpu/cl/operators/ClDirectConvolution.cpp102
-rw-r--r--src/runtime/gpu/cl/operators/ClDirectConvolution.h92
-rw-r--r--tests/validation/CL/UNIT/Tuner.cpp78
20 files changed, 485 insertions, 977 deletions
diff --git a/Android.bp b/Android.bp
index 8175dc69a2..8560de3e89 100644
--- a/Android.bp
+++ b/Android.bp
@@ -99,7 +99,6 @@ cc_library_static {
"src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp",
"src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp",
"src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp",
- "src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp",
"src/core/CL/kernels/CLFFTDigitReverseKernel.cpp",
"src/core/CL/kernels/CLFFTRadixStageKernel.cpp",
"src/core/CL/kernels/CLFFTScaleKernel.cpp",
@@ -375,6 +374,7 @@ cc_library_static {
"src/core/gpu/cl/kernels/ClCropKernel.cpp",
"src/core/gpu/cl/kernels/ClDepthConcatenateKernel.cpp",
"src/core/gpu/cl/kernels/ClDequantizationKernel.cpp",
+ "src/core/gpu/cl/kernels/ClDirectConvolutionKernel.cpp",
"src/core/gpu/cl/kernels/ClElementwiseKernel.cpp",
"src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp",
"src/core/gpu/cl/kernels/ClFillKernel.cpp",
@@ -523,9 +523,7 @@ cc_library_static {
"src/runtime/CL/mlgo/MLGOHeuristics.cpp",
"src/runtime/CL/mlgo/MLGOParser.cpp",
"src/runtime/CL/mlgo/Utils.cpp",
- "src/runtime/CL/tuners/BifrostTuner.cpp",
"src/runtime/CL/tuners/CLTuningParametersList.cpp",
- "src/runtime/CL/tuners/MidgardTuner.cpp",
"src/runtime/CPP/CPPScheduler.cpp",
"src/runtime/CPP/ICPPSimpleFunction.cpp",
"src/runtime/CPP/SingleThreadScheduler.cpp",
@@ -677,6 +675,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClCopy.cpp",
"src/runtime/gpu/cl/operators/ClCrop.cpp",
"src/runtime/gpu/cl/operators/ClDequantization.cpp",
+ "src/runtime/gpu/cl/operators/ClDirectConvolution.cpp",
"src/runtime/gpu/cl/operators/ClElementwiseOperations.cpp",
"src/runtime/gpu/cl/operators/ClElementwiseUnary.cpp",
"src/runtime/gpu/cl/operators/ClFill.cpp",
diff --git a/arm_compute/runtime/CL/CLScheduler.h b/arm_compute/runtime/CL/CLScheduler.h
index d3a91da751..41a074089e 100644
--- a/arm_compute/runtime/CL/CLScheduler.h
+++ b/arm_compute/runtime/CL/CLScheduler.h
@@ -165,13 +165,12 @@ private:
/** Flag to ensure symbols initialisation is happening before Scheduler creation */
static std::once_flag _initialize_symbols;
- cl::Context _context;
- cl::CommandQueue _queue;
- GPUTarget _target;
- bool _is_initialised;
- ICLTuner *_cl_tuner;
- std::unique_ptr<ICLTuner> _cl_default_static_tuner;
- CLGEMMHeuristicsHandle *_gemm_heuristics;
+ cl::Context _context;
+ cl::CommandQueue _queue;
+ GPUTarget _target;
+ bool _is_initialised;
+ ICLTuner *_cl_tuner;
+ CLGEMMHeuristicsHandle *_gemm_heuristics;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLSCHEDULER_H */
diff --git a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
index 0afc9d3f38..6e9e2161b9 100644
--- a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,6 @@
namespace arm_compute
{
class CLCompileContext;
-class CLDirectConvolutionLayerKernel;
-class CLFillBorderKernel;
class ICLTensor;
class ITensorInfo;
@@ -43,14 +41,18 @@ class ITensorInfo;
class CLDirectConvolutionLayer : public IFunction
{
public:
- /** Default constructor */
+ /** Constructor */
CLDirectConvolutionLayer();
- /** Prevent instances of this class from being copied */
+ /** Destructor */
+ ~CLDirectConvolutionLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
CLDirectConvolutionLayer(const CLDirectConvolutionLayer &) = delete;
- /** Prevent instances of this class from being copied */
+ /** Default move constructor */
+ CLDirectConvolutionLayer(CLDirectConvolutionLayer &&);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
CLDirectConvolutionLayer &operator=(const CLDirectConvolutionLayer &) = delete;
- /** Default destructor */
- ~CLDirectConvolutionLayer();
+ /** Default move assignment operator */
+ CLDirectConvolutionLayer &operator=(CLDirectConvolutionLayer &&);
/** Set the input and output tensors.
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
@@ -103,11 +105,8 @@ public:
void run() override;
private:
- std::unique_ptr<CLDirectConvolutionLayerKernel> _direct_conv_kernel;
- std::unique_ptr<CLFillBorderKernel> _input_border_handler;
- CLActivationLayer _activationlayer_function;
-
- bool _is_activationlayer_enabled;
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
};
}
#endif /* ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYER_H */
diff --git a/arm_compute/runtime/CL/tuners/BifrostTuner.h b/arm_compute/runtime/CL/tuners/BifrostTuner.h
deleted file mode 100644
index 237693fb88..0000000000
--- a/arm_compute/runtime/CL/tuners/BifrostTuner.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TUNERS_BIFROST_TUNER_H
-#define ARM_COMPUTE_TUNERS_BIFROST_TUNER_H
-
-#include "arm_compute/runtime/CL/ICLTuner.h"
-
-namespace arm_compute
-{
-namespace tuners
-{
-/** Bifrost based OpenCL tuner implementation */
-class BifrostTuner final : public ICLTuner
-{
-public:
- // Inherited overriden methods
- void tune_kernel_static(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) override;
-};
-} // namespace tuners
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_TUNERS_BIFROST_TUNER_H */
diff --git a/arm_compute/runtime/CL/tuners/MidgardTuner.h b/arm_compute/runtime/CL/tuners/MidgardTuner.h
deleted file mode 100644
index 86d46044c2..0000000000
--- a/arm_compute/runtime/CL/tuners/MidgardTuner.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TUNERS_MIDGARD_TUNER_H
-#define ARM_COMPUTE_TUNERS_MIDGARD_TUNER_H
-
-#include "arm_compute/runtime/CL/ICLTuner.h"
-
-namespace arm_compute
-{
-namespace tuners
-{
-/** Midgard based OpenCL tuner implementation */
-class MidgardTuner final : public ICLTuner
-{
-public:
- // Inherited overriden methods
- void tune_kernel_static(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) override;
-};
-} // namespace tuners
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_TUNERS_MIDGARD_TUNER_H */
diff --git a/arm_compute/runtime/CL/tuners/Tuners.h b/arm_compute/runtime/CL/tuners/Tuners.h
deleted file mode 100644
index 3ba9e0071d..0000000000
--- a/arm_compute/runtime/CL/tuners/Tuners.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TUNERS_H
-#define ARM_COMPUTE_TUNERS_H
-
-#include "arm_compute/runtime/CL/tuners/BifrostTuner.h"
-#include "arm_compute/runtime/CL/tuners/MidgardTuner.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace tuners
-{
-/** Tuner factory class */
-class TunerFactory final
-{
-public:
- static std::unique_ptr<ICLTuner> create_tuner(GPUTarget target)
- {
- GPUTarget arch = get_arch_from_target(target);
- switch(arch)
- {
- case GPUTarget::BIFROST:
- return std::make_unique<BifrostTuner>();
- case GPUTarget::MIDGARD:
- return std::make_unique<MidgardTuner>();
- default:
- return nullptr;
- }
- }
-};
-} // namespace tuners
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_TUNERS_H */
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index 67702c2185..913f76cf5b 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -188,7 +188,7 @@ v21.02 Public major release
- @ref cpu::kernels::CpuLogits1DMaxKernel
- @ref cpu::kernels::CpuElementwiseUnaryKernel
- Remove padding from OpenCL kernels:
- - @ref CLDirectConvolutionLayerKernel
+ - CLDirectConvolutionLayerKernel
- @ref CLArgMinMaxLayerKernel
- @ref CLPadLayerKernel
- @ref CLROIAlignLayerKernel
@@ -1307,7 +1307,7 @@ v17.09 Public major release
- New OpenCL kernels / functions:
- @ref CLDepthwiseConvolutionLayer3x3NCHWKernel @ref CLDepthwiseConvolutionLayer3x3NHWCKernel CLDepthwiseIm2ColKernel CLDepthwiseVectorToTensorKernel CLDepthwiseWeightsReshapeKernel / CLDepthwiseConvolutionLayer3x3 @ref CLDepthwiseConvolutionLayer CLDepthwiseSeparableConvolutionLayer
- CLDequantizationLayerKernel / CLDequantizationLayer
- - @ref CLDirectConvolutionLayerKernel / @ref CLDirectConvolutionLayer
+ - CLDirectConvolutionLayerKernel / @ref CLDirectConvolutionLayer
- CLFlattenLayer
- CLFloorKernel / @ref CLFloor
- CLGEMMTranspose1xW
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index 0ec573c1a6..eaac415bc4 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -42,7 +42,6 @@
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h"
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h"
-#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
#include "src/core/CL/kernels/CLFFTDigitReverseKernel.h"
#include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
#include "src/core/CL/kernels/CLFFTScaleKernel.h"
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.h
deleted file mode 100644
index 0257d0c2dd..0000000000
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYERKERNEL_H
-#define ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYERKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the direct convolution kernel.
- */
-class CLDirectConvolutionLayerKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- CLDirectConvolutionLayerKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDirectConvolutionLayerKernel(const CLDirectConvolutionLayerKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDirectConvolutionLayerKernel &operator=(const CLDirectConvolutionLayerKernel &) = delete;
- /** Allow instances of this class to be moved */
- CLDirectConvolutionLayerKernel(CLDirectConvolutionLayerKernel &&) = default;
- /** Allow instances of this class to be moved */
- CLDirectConvolutionLayerKernel &operator=(CLDirectConvolutionLayerKernel &&) = default;
- /** Default destructor */
- ~CLDirectConvolutionLayerKernel() = default;
- /** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3
- * 3x3 convolution with stride_x = 1/2, stride_y = 1/2
- * 5x5 convolution with stride_x = 1/2, stride_y = 1/2
- * 9x9 convolution with stride_x = 1/2, stride_y = 1/2
- *
- * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
- /** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3
- * 3x3 convolution with stride_x = 1/2, stride_y = 1/2
- * 5x5 convolution with stride_x = 1/2, stride_y = 1/2
- * 9x9 convolution with stride_x = 1/2, stride_y = 1/2
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerKernel
- *
- * @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported:Same as @p input.
- * @param[in] biases Biases tensor. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
- * @param[in] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] target Target GPU architecture.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target);
-
- // Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-public:
- const ICLTensor *_input;
- const ICLTensor *_biases;
- const ICLTensor *_weights;
- ICLTensor *_output;
- DataLayout _data_layout;
- BorderSize _border_size;
- int _conv_stride_x;
- int _conv_stride_y;
- PadStrideInfo _conv_info;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYERKERNEL_H */
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/gpu/cl/kernels/ClDirectConvolutionKernel.cpp
index 2fc3c60f67..f071dbc468 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClDirectConvolutionKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
+#include "src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
@@ -36,26 +36,31 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
+#include "support/Cast.h"
#include "support/StringSupport.h"
namespace arm_compute
{
+namespace opencl
+{
+namespace kernels
+{
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights);
- const DataLayout data_layout = input->data_layout();
+ const DataLayout data_layout = src->data_layout();
const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != input->dimension(channel_idx),
- "Weights feature map dimension should match the respective input's one");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != src->dimension(channel_idx),
+ "Weights feature map dimension should match the respective src's one");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9)
@@ -64,7 +69,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
if(data_layout == DataLayout::NCHW)
{
- if(is_data_type_quantized(input->data_type()))
+ if(is_data_type_quantized(src->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
"Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported with quantized data types");
@@ -78,7 +83,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
if(biases != nullptr)
{
- if(is_data_type_quantized_asymmetric(input->data_type()))
+ if(is_data_type_quantized_asymmetric(src->data_type()))
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
}
@@ -87,25 +92,25 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
}
ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(3),
- "Biases size and number of input feature maps should match");
+ "Biases size and number of src feature maps should match");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1,
"Biases should be one dimensional");
}
- // Checks performed when output is configured
- if(output->total_size() != 0)
+ // Checks performed when dst is configured
+ if(dst->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(),
- misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(),
+ misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
}
- const auto data_type = input->data_type();
+ const auto data_type = src->data_type();
if(is_data_type_quantized(data_type))
{
- const UniformQuantizationInfo iqinfo = input->quantization_info().uniform();
+ const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = output->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier = 0;
@@ -130,10 +135,10 @@ inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsi
inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
- unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *input)
+ unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *src)
{
- const DataType data_type = input->data_type();
- const DataLayout data_layout = input->data_layout();
+ const DataType data_type = src->data_type();
+ const DataLayout data_layout = src->data_layout();
unsigned int conv_stride_x = std::get<0>(conv_info.stride());
unsigned int conv_stride_y = std::get<1>(conv_info.stride());
@@ -191,7 +196,7 @@ inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, u
num_elems_read_per_iteration_x = 16;
break;
case 3:
- switch(input->element_size())
+ switch(src->element_size())
{
case 1:
num_elems_read_per_iteration_x = 28;
@@ -255,26 +260,26 @@ inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, u
}
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst, const PadStrideInfo &conv_info, const GPUTarget target)
{
- const DataLayout data_layout = input->data_layout();
+ const DataLayout data_layout = src->data_layout();
- // Get output shape
- TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info);
+ // Get dst shape
+ TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
// Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, output_shape,
+ auto_init_if_empty(*dst, output_shape,
1,
- input->data_type(),
- input->quantization_info());
+ src->data_type(),
+ src->quantization_info());
if(data_layout == DataLayout::NHWC)
{
- const unsigned int vec_size = std::min(static_cast<unsigned int>(output->tensor_shape()[0]), 4u);
+ const unsigned int vec_size = std::min(static_cast<unsigned int>(dst->tensor_shape()[0]), 4u);
// Create window and update padding
- Window win = calculate_max_window(*output, Steps(vec_size, 1U));
- output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
+ Window win = calculate_max_window(*dst, Steps(vec_size, 1U));
+ dst->set_valid_region(ValidRegion(Coordinates(), dst->tensor_shape()));
Status err = Status{};
return std::make_pair(err, win);
}
@@ -295,17 +300,17 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
- kernel_size, conv_info, target, input);
+ kernel_size, conv_info, target, src);
// Create window and update padding
bool window_changed = false;
- Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
+ Window win = calculate_max_window(*dst, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
- AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
+ AccessWindowRectangle input_access(src, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
- AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
+ AccessWindowRectangle output_access(dst, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
@@ -316,52 +321,39 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
}
} // namespace
-CLDirectConvolutionLayerKernel::CLDirectConvolutionLayerKernel()
- : _input(nullptr), _biases(nullptr), _weights(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _border_size(0), _conv_stride_x(0), _conv_stride_y(0), _conv_info()
-{
-}
-
-BorderSize CLDirectConvolutionLayerKernel::border_size() const
+BorderSize ClDirectConvolutionKernel::border_size() const
{
return _border_size;
}
-void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+void ClDirectConvolutionKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const PadStrideInfo &conv_info)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info);
-}
-
-void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
// Perform validation
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
- weights->info(),
- (biases != nullptr) ? biases->info() : nullptr,
- output->info(),
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src,
+ weights,
+ (biases != nullptr) ? biases : nullptr,
+ dst,
conv_info));
- _conv_stride_x = std::get<0>(conv_info.stride());
- _conv_stride_y = std::get<1>(conv_info.stride());
- _data_layout = input->info()->data_layout();
- _input = input;
- _weights = weights;
- _output = output;
- _biases = biases;
- _conv_info = conv_info;
+ const int conv_stride_x = std::get<0>(conv_info.stride());
+ const int conv_stride_y = std::get<1>(conv_info.stride());
+
+ _data_layout = src->data_layout();
+ _conv_info = conv_info;
const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
- const unsigned int kernel_size = weights->info()->dimension(width_idx);
- const DataType data_type = input->info()->data_type();
+ const unsigned int kernel_size = weights->dimension(width_idx);
+ const DataType data_type = src->data_type();
const GPUTarget gpu_target = get_target();
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, gpu_target);
+ auto win_config = validate_and_configure_window(src, weights, dst, conv_info, gpu_target);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
@@ -376,30 +368,30 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
const unsigned int n0 = win_config.second.x().step();
const unsigned int m0 = win_config.second.y().step();
- const unsigned int k0 = adjust_vec_size(16u, _input->info()->dimension(channel_idx));
- const unsigned int partial_store_n0 = _output->info()->dimension(channel_idx) % n0;
- const unsigned int partial_store_m0 = (_output->info()->dimension(width_idx) * _output->info()->dimension(height_idx)) % m0;
+ const unsigned int k0 = adjust_vec_size(16u, src->dimension(channel_idx));
+ const unsigned int partial_store_n0 = dst->dimension(channel_idx) % n0;
+ const unsigned int partial_store_m0 = (dst->dimension(width_idx) * dst->dimension(height_idx)) % m0;
const unsigned int pad_left = conv_info.pad_left();
const unsigned int pad_top = conv_info.pad_top();
- if(_biases != nullptr)
+ if(biases != nullptr)
{
build_options.add_option(std::string("-DHAS_BIAS"));
- build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(_biases->info()->data_type())));
+ build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(biases->data_type())));
}
- build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(width_idx)));
- build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(height_idx)));
- build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(_input->info()->dimension(channel_idx)));
- build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
- build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(width_idx)));
- build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(height_idx)));
- build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->dimension(channel_idx)));
- build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(_output->info()->data_type()));
- build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(_weights->info()->dimension(width_idx)));
- build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(_weights->info()->dimension(height_idx)));
- build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(_weights->info()->data_type()));
- build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
- build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
+ build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(width_idx)));
+ build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(height_idx)));
+ build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src->dimension(channel_idx)));
+ build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
+ build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(width_idx)));
+ build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(height_idx)));
+ build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(channel_idx)));
+ build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
+ build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(weights->dimension(width_idx)));
+ build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(weights->dimension(height_idx)));
+ build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(weights->data_type()));
+ build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x));
+ build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_stride_y));
build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
@@ -410,11 +402,11 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
if(is_data_type_quantized(data_type))
{
- const UniformQuantizationInfo iqinfo = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = _output->info()->quantization_info().uniform();
+ const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
- PixelValue zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
+ PixelValue zero_value = PixelValue(0, src->data_type(), src->quantization_info());
int zero_value_s32;
zero_value.get(zero_value_s32);
@@ -441,17 +433,17 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
}
else
{
- _border_size = BorderSize(_input->info()->padding());
+ _border_size = BorderSize(src->padding());
kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
- build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
+ build_options.add_option_if(biases != nullptr, std::string("-DHAS_BIAS"));
- const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, _data_layout);
+ const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, conv_stride_x, conv_stride_y, kernel_size, data_type, _data_layout);
if(run_optimized_for_bifrost)
{
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
+ build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
kernel_name << "_f32_bifrost";
}
@@ -459,15 +451,15 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
{
build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
- build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)));
+ build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
+ build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x)));
build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
if(is_data_type_quantized(data_type))
{
- const UniformQuantizationInfo iqinfo = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = _weights->info()->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = _output->info()->quantization_info().uniform();
+ const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier = 0;
@@ -502,27 +494,27 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
_config_id += "_";
_config_id += support::cpp11::to_string(border_size().bottom);
_config_id += "_";
- _config_id += support::cpp11::to_string(_conv_stride_x);
+ _config_id += support::cpp11::to_string(conv_stride_x);
_config_id += "_";
- _config_id += support::cpp11::to_string(_conv_stride_y);
+ _config_id += support::cpp11::to_string(conv_stride_y);
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(width_idx));
+ _config_id += support::cpp11::to_string(dst->dimension(width_idx));
_config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(height_idx));
+ _config_id += support::cpp11::to_string(dst->dimension(height_idx));
_config_id += "_";
_config_id += lower_string(string_from_data_layout(_data_layout));
}
-Status CLDirectConvolutionLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
- const GPUTarget target)
+Status ClDirectConvolutionKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const GPUTarget target)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, target).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), weights->clone().get(), dst->clone().get(), conv_info, target).first);
return Status{};
}
-void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue &queue)
+void ClDirectConvolutionKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
@@ -530,20 +522,25 @@ void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue
// Get initial windows
Window slice = window.first_slice_window_3D();
+ const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ const auto weights = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ const auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
if(_data_layout == DataLayout::NHWC)
{
- slice.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1) * _output->info()->dimension(2), 1));
- slice.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(3), 1));
+ slice.set(Window::DimY, Window::Dimension(0, dst->info()->dimension(1) * dst->info()->dimension(2), 1));
+ slice.set(Window::DimZ, Window::Dimension(0, dst->info()->dimension(3), 1));
unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _output, slice);
- add_3D_tensor_argument(idx, _weights, slice);
- if(_biases != nullptr)
+ add_3D_tensor_argument(idx, src, slice);
+ add_3D_tensor_argument(idx, dst, slice);
+ add_3D_tensor_argument(idx, weights, slice);
+ if(biases != nullptr)
{
- add_1D_tensor_argument(idx, _biases, slice);
+ add_1D_tensor_argument(idx, biases, slice);
}
- _kernel.setArg(idx++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
+ _kernel.setArg(idx++, static_cast<unsigned int>(weights->info()->strides_in_bytes()[3]));
enqueue(queue, *this, slice, lws_hint());
}
else
@@ -556,30 +553,35 @@ void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue
const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- win_in.set_dimension_step(width_idx, window[width_idx].step() * _conv_stride_x);
- win_in.set_dimension_step(height_idx, window[height_idx].step() * _conv_stride_y);
+ const int conv_stride_x = std::get<0>(_conv_info.stride());
+ const int conv_stride_y = std::get<1>(_conv_info.stride());
+
+ win_in.set_dimension_step(width_idx, window[width_idx].step() * conv_stride_x);
+ win_in.set_dimension_step(height_idx, window[height_idx].step() * conv_stride_y);
Window slice_in = win_in.first_slice_window_3D();
unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
- add_3D_tensor_argument(idx1, _weights, slice);
+ add_3D_tensor_argument(idx1, weights, slice);
- if(_biases != nullptr)
+ if(biases != nullptr)
{
Window slice_biases;
- slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- add_1D_tensor_argument(idx1, _biases, slice_biases);
+ slice_biases.use_tensor_dimensions(biases->info()->tensor_shape());
+ add_1D_tensor_argument(idx1, biases, slice_biases);
}
- _kernel.setArg(idx1++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
+ _kernel.setArg(idx1++, static_cast<unsigned int>(weights->info()->strides_in_bytes()[3]));
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_3D_tensor_argument(idx, _output, slice);
+ add_3D_tensor_argument(idx, src, slice_in);
+ add_3D_tensor_argument(idx, dst, slice);
enqueue(queue, *this, slice, lws_hint());
}
while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
}
}
+} // namespace kernels
+} // namespace opencl
} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h b/src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h
new file mode 100644
index 0000000000..ff2f5619db
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_DIRECT_CONVOLUTION_KERNEL_H
+#define ARM_COMPUTE_CL_DIRECT_CONVOLUTION_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** Interface for the direct convolution kernel.
+ */
+class ClDirectConvolutionKernel : public ICLKernel
+{
+public:
+ ClDirectConvolutionKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDirectConvolutionKernel);
+ /** Set the src, weights, biases and dst tensors info.
+ *
+ * @note: Due to set_valid_region(), thus src/weights/biases cannot be const. Need to change this once the set_valid_region() is removed.
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3
+ * 3x3 convolution with stride_x = 1/2, stride_y = 1/2
+ * 5x5 convolution with stride_x = 1/2, stride_y = 1/2
+ * 9x9 convolution with stride_x = 1/2, stride_y = 1/2
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The src tensor info to convolve. 3 lower dimensions represent a single src [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
+ * The 3rd dimension must be the same as the src's volume 3rd dimension.
+ * Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor info. Biases are 1D tensor with dimension [OFM].
+ * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
+ * @param[out] dst Output tensor info.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ */
+ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref ClDirectConvolutionKernel
+ *
+ * @param[in] src The src tensor info to convolve. 3 lower dimensions represent a single src [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
+ * The 3rd dimension must be the same as the src's volume 3rd dimension.
+ * Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor info. Biases are 1D tensor with dimension [OFM].
+ * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
+ * @param[in] dst Output tensor info.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] target Target GPU architecture.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info, const GPUTarget target);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+ BorderSize border_size() const override;
+
+public:
+ DataLayout _data_layout{};
+ BorderSize _border_size{};
+ PadStrideInfo _conv_info{};
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CL_DIRECT_CONVOLUTION_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClScaleKernel.cpp b/src/core/gpu/cl/kernels/ClScaleKernel.cpp
index 0882f29135..7fb5d2a5d3 100644
--- a/src/core/gpu/cl/kernels/ClScaleKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClScaleKernel.cpp
@@ -146,28 +146,26 @@ void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorIn
auto padding_info = get_padding_info({ src, dst });
// Info required for the static tuning
- _info = info;
- _data_type = src->data_type();
- _data_layout = _info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : _info.data_layout;
+ _data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
float wr = 0.f;
float hr = 0.f;
- std::tie(wr, hr) = calculate_scale_factors(src, dst, _data_layout, _info.align_corners);
- const bool call_quantized_kernel = is_data_type_quantized_asymmetric(src->data_type()) && _info.interpolation_policy == InterpolationPolicy::BILINEAR;
+ std::tie(wr, hr) = calculate_scale_factors(src, dst, _data_layout, info.align_corners);
+ const bool call_quantized_kernel = is_data_type_quantized_asymmetric(src->data_type()) && info.interpolation_policy == InterpolationPolicy::BILINEAR;
// Compute actual border size
BorderSize border = border_size();
const bool is_nhwc = _data_layout == DataLayout::NHWC;
// Area interpolation behaves as Nearest Neighbour in case of up-sampling
- auto interpolation_policy_to_use = _info.interpolation_policy;
- if(_info.interpolation_policy == InterpolationPolicy::AREA && wr <= 1.f && hr <= 1.f)
+ auto interpolation_policy_to_use = info.interpolation_policy;
+ if(info.interpolation_policy == InterpolationPolicy::AREA && wr <= 1.f && hr <= 1.f)
{
interpolation_policy_to_use = InterpolationPolicy::NEAREST_NEIGHBOR;
}
// Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, _info, border);
+ auto win_config = validate_and_configure_window(src, dst, info, border);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
@@ -178,7 +176,7 @@ void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorIn
build_opts.add_option("-DBORDER_SIZE=" + support::cpp11::to_string(border.right));
build_opts.add_option_if(info.border_mode == BorderMode::REPLICATE, "-DBORDER_MODE_REPLICATE");
build_opts.add_option_if(is_nhwc, "-DDEPTH_OUT=" + support::cpp11::to_string(dst->dimension(2)));
- build_opts.add_option_if_else(_info.sampling_policy == SamplingPolicy::CENTER, "-DSAMPLING_POLICY_CENTER", "-DSAMPLING_POLICY_TOP_LEFT");
+ build_opts.add_option_if_else(info.sampling_policy == SamplingPolicy::CENTER, "-DSAMPLING_POLICY_CENTER", "-DSAMPLING_POLICY_TOP_LEFT");
build_opts.add_option_if(info.align_corners, "-DALIGN_CORNERS");
if(call_quantized_kernel)
{
@@ -209,13 +207,10 @@ void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorIn
_kernel.setArg<float>(idx++, wr);
_kernel.setArg<float>(idx++, hr);
- // Set to enable static tuning
- _output_x_dim = dst->dimension(0);
-
// Set config_id for enabling LWS tuning
_config_id = "scale_";
- _config_id += (_info.border_mode == BorderMode::REPLICATE ? "Bord_rep" : "");
- _config_id += (_info.sampling_policy == SamplingPolicy::CENTER ? "center" : "topleft");
+ _config_id += (info.border_mode == BorderMode::REPLICATE ? "Bord_rep" : "");
+ _config_id += (info.sampling_policy == SamplingPolicy::CENTER ? "center" : "topleft");
_config_id += (is_nhwc ? "nhwc" : "nchw");
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(0));
diff --git a/src/core/gpu/cl/kernels/ClScaleKernel.h b/src/core/gpu/cl/kernels/ClScaleKernel.h
index b6eea0620b..10a1105f08 100644
--- a/src/core/gpu/cl/kernels/ClScaleKernel.h
+++ b/src/core/gpu/cl/kernels/ClScaleKernel.h
@@ -70,29 +70,8 @@ public:
BorderSize border_size() const override;
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- // Getter for interpolation policy
- InterpolationPolicy get_interpolation_policy() const
- {
- return _info.interpolation_policy;
- }
-
- // Getter for data type
- DataType get_data_type() const
- {
- return _data_type;
- }
-
- // Getter for output x dimension
- unsigned int get_output_x_dim() const
- {
- return _output_x_dim;
- }
-
private:
- ScaleKernelInfo _info = ScaleKernelInfo(InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED);
- DataType _data_type = DataType::UNKNOWN;
- DataLayout _data_layout = DataLayout::UNKNOWN;
- unsigned int _output_x_dim = 0;
+ DataLayout _data_layout = DataLayout::UNKNOWN;
};
} // namespace kernels
} // namespace opencl
diff --git a/src/runtime/CL/CLScheduler.cpp b/src/runtime/CL/CLScheduler.cpp
index ef5cb03b32..f228cf6513 100644
--- a/src/runtime/CL/CLScheduler.cpp
+++ b/src/runtime/CL/CLScheduler.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/runtime/CL/CLHelpers.h"
#include "arm_compute/runtime/CL/CLTuner.h"
-#include "arm_compute/runtime/CL/tuners/Tuners.h"
#include "src/core/CL/ICLKernel.h"
namespace arm_compute
@@ -97,7 +96,7 @@ bool CLScheduler::is_initialised() const
std::once_flag CLScheduler::_initialize_symbols;
CLScheduler::CLScheduler()
- : _context(), _queue(), _target(GPUTarget::MIDGARD), _is_initialised(false), _cl_tuner(nullptr), _cl_default_static_tuner(nullptr), _gemm_heuristics(nullptr)
+ : _context(), _queue(), _target(GPUTarget::MIDGARD), _is_initialised(false), _cl_tuner(nullptr), _gemm_heuristics(nullptr)
{
}
@@ -116,8 +115,7 @@ void CLScheduler::default_init_with_context(cl::Device &device, cl::Context &ctx
cl::CommandQueue queue = cl::CommandQueue(ctx, device);
CLKernelLibrary::get().init(cl_kernels_folder, ctx, device);
init(ctx, queue, device, cl_tuner, gemm_h);
- _cl_default_static_tuner = tuners::TunerFactory::create_tuner(_target);
- _cl_tuner = (cl_tuner == nullptr) ? _cl_default_static_tuner.get() : cl_tuner;
+ _cl_tuner = cl_tuner;
}
}
@@ -133,12 +131,10 @@ void CLScheduler::default_init(ICLTuner *cl_tuner, CLGEMMHeuristicsHandle *gemm_
cl::CommandQueue queue = cl::CommandQueue(ctx, dev);
CLKernelLibrary::get().init("./cl_kernels/", ctx, dev);
init(ctx, queue, dev, cl_tuner, gemm_h);
- // Create a default static tuner and set if none was provided
- _cl_default_static_tuner = tuners::TunerFactory::create_tuner(_target);
}
// Set CL tuner
- _cl_tuner = (cl_tuner == nullptr) ? _cl_default_static_tuner.get() : cl_tuner;
+ _cl_tuner = cl_tuner;
}
void CLScheduler::set_context(cl::Context context)
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
index 49e97693e4..d60d11aa5f 100644
--- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,18 +28,27 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/runtime/gpu/cl/operators/ClActivation.h"
+#include "src/runtime/gpu/cl/operators/ClDirectConvolution.h"
-using namespace arm_compute;
+namespace arm_compute
+{
+struct CLDirectConvolutionLayer::Impl
+{
+ const ICLTensor *src{ nullptr };
+ const ICLTensor *weights{ nullptr };
+ const ICLTensor *biases{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClDirectConvolution> op{ nullptr };
+};
CLDirectConvolutionLayer::CLDirectConvolutionLayer()
- : _direct_conv_kernel(std::make_unique<CLDirectConvolutionLayerKernel>()), _input_border_handler(std::make_unique<CLFillBorderKernel>()), _activationlayer_function(),
- _is_activationlayer_enabled(false)
+ : _impl(std::make_unique<Impl>())
{
}
-
-CLDirectConvolutionLayer::~CLDirectConvolutionLayer() = default;
+CLDirectConvolutionLayer::CLDirectConvolutionLayer(CLDirectConvolutionLayer &&) = default;
+CLDirectConvolutionLayer &CLDirectConvolutionLayer::operator=(CLDirectConvolutionLayer &&) = default;
+CLDirectConvolutionLayer::~CLDirectConvolutionLayer() = default;
void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
@@ -47,57 +56,32 @@ void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weig
}
void CLDirectConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
- const PadStrideInfo &conv_info,
- const ActivationLayerInfo &act_info)
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
- // Set GPU target
- _direct_conv_kernel->set_target(CLScheduler::get().target());
-
- // Configure direct convolution
- _direct_conv_kernel->configure(compile_context, input, weights, biases, output, conv_info);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- // Configure border handler
- PixelValue &&zero_value(0.f);
- if(is_data_type_quantized_asymmetric(input->info()->data_type()))
- {
- zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
- }
- _input_border_handler->configure(compile_context, input, _direct_conv_kernel->border_size(), BorderMode::CONSTANT, zero_value);
+ _impl->src = input;
+ _impl->weights = weights;
+ _impl->biases = biases;
+ _impl->dst = output;
- // Tune kernels
- CLScheduler::get().tune_kernel_static(*_direct_conv_kernel);
-
- _is_activationlayer_enabled = act_info.enabled();
-
- //Configure Activation Layer
- if(_is_activationlayer_enabled)
- {
- _activationlayer_function.configure(compile_context, output, nullptr, act_info);
- }
+ _impl->op = std::make_unique<opencl::ClDirectConvolution>();
+ _impl->op->configure(compile_context, _impl->src->info(), _impl->weights->info(), _impl->biases->info(), _impl->dst->info(), conv_info, act_info);
}
Status CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, CLScheduler::get().target()));
- if(act_info.enabled())
- {
- ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
- }
- return Status{};
+ return opencl::ClDirectConvolution::validate(input, weights, biases, output, conv_info, act_info);
}
void CLDirectConvolutionLayer::run()
{
- // Run border handler
- CLScheduler::get().enqueue(*_input_border_handler, false);
-
- // Run direct convolution
- CLScheduler::get().enqueue(*_direct_conv_kernel);
-
- //Run Activation Layer
- if(_is_activationlayer_enabled)
- {
- _activationlayer_function.run();
- }
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_SRC_1, _impl->weights);
+ pack.add_tensor(TensorType::ACL_SRC_2, _impl->biases);
+ pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+ _impl->op->run(pack);
}
+} \ No newline at end of file
diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp
deleted file mode 100644
index fe95829cca..0000000000
--- a/src/runtime/CL/tuners/BifrostTuner.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/tuners/BifrostTuner.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "src/core/CL/CLKernels.h"
-#include "support/Cast.h"
-
-#include "src/core/gpu/cl/kernels/ClPoolingKernel.h"
-#include "src/core/gpu/cl/kernels/ClScaleKernel.h"
-
-namespace arm_compute
-{
-namespace tuners
-{
-namespace
-{
-/** Tunes a @ref CLDirectConvolutionLayerKernel for a bifrost target
- *
- * @param[in] k Kernels to tune
- */
-void tune_direct_convolution_kernel(CLDirectConvolutionLayerKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
-
- const GPUTarget gpu_target = k.get_target();
- const DataType dt = k._input->info()->data_type();
- const TensorShape weights_shape = k._weights->info()->tensor_shape();
- const TensorShape inputs_shape = k._input->info()->tensor_shape();
- const size_t kernel_size = weights_shape.x();
- const unsigned int stride_x = k._conv_stride_x;
- const unsigned int stride_y = k._conv_stride_y;
-
- if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (kernel_size <= 5) && (stride_x == 1) && (stride_y == 1) && (dt == DataType::F32))
- {
- // Through extensive experimentation with over 30 representative tensor
- // shapes, we found a small number of local work size configurations
- // that result in nearly optimal execution times. Selecting the right
- // lws for a given shape, however, required a complex decision tree,
- // until we constructed a simple feature as described below.
- //
- // We started from the number of multiply-accumulate operations for a
- // convolution layer, which is equal to the product of the input
- // dimensions 0..2 and the weights dimensions 0..2. Unfortunately,
- // this resulted in ties between distinct shapes that required distinct
- // lws configurations. Replacing the width of the input with the kernel
- // size, however, resulted in nearly optimal predictions. We use underscores
- // in variable names to indicate when they are intentionally misleading.
- const size_t product_of_weights_dimensions = weights_shape[0] * weights_shape[1] * weights_shape[2];
- const size_t product_of_input_dimensions_ = inputs_shape[0] * inputs_shape[1] * inputs_shape[2];
- const float mega_ops_ = 1e-6 * product_of_weights_dimensions * product_of_input_dimensions_;
-
- switch(kernel_size)
- {
- case 1:
- {
- if(mega_ops_ < 1.f)
- {
- lws_hint = cl::NDRange(1, 1, 8);
- }
- else if(mega_ops_ < 7.f)
- {
- lws_hint = cl::NDRange(1, 1, 4);
- }
- else
- {
- lws_hint = cl::NDRange(1, 1, 2);
- }
- break;
- }
- case 3:
- {
- if(mega_ops_ < 1.f)
- {
- lws_hint = cl::NDRange(1, 1, 8);
- }
- else if(mega_ops_ < 13.f)
- {
- lws_hint = cl::NDRange(2, 1, 4);
- }
- else if(mega_ops_ < 50.f)
- {
- lws_hint = cl::NDRange(3, 1, 4);
- }
- else
- {
- lws_hint = cl::NDRange(2, 1, 6);
- }
- break;
- }
- case 5:
- {
- if(mega_ops_ < 2.f || mega_ops_ > 80.f)
- {
- lws_hint = cl::NDRange(2, 1, 4);
- }
- else
- {
- lws_hint = cl::NDRange(2, 1, 8);
- }
- break;
- }
- default:
- break;
- }
- k.set_lws_hint(lws_hint);
- }
-}
-
-void tune_col2im_kernel(CLCol2ImKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
- const GPUTarget gpu_target = k.get_target();
-
- // Configure the local work size for Bifrost with a value obtained
- // via exhaustive autotuning over 30 representative tensor shapes.
- if(gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT))
- {
- if((k._convolved_dims.width == 7) || (k._convolved_dims.width == 14))
- {
- lws_hint = cl::NDRange(1, 7, 1);
- }
- else
- {
- lws_hint = cl::NDRange(1, 8, 1);
- }
- }
-
- k.set_lws_hint(lws_hint);
-}
-
-void tune_im2col_kernel(CLIm2ColKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
- const GPUTarget gpu_target = k.get_target();
-
- // Local work size optimized for the 11x11 AlexNet convolution on Bifrost.
- if(gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT)
- && k._kernel_dims.width == 11)
- {
- const bool is_square_kernel = (k._kernel_dims.width == k._kernel_dims.height);
- if(!is_square_kernel && k._kernel_dims.width > 1 && !k._conv_info.has_padding())
- {
- lws_hint = cl::NDRange(1, 1, 1);
- }
- }
- k.set_lws_hint(lws_hint);
-}
-
-void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
- const GPUTarget gpu_target = k.get_target();
-
- // Configure LWS hint
- switch(gpu_target)
- {
- case GPUTarget::G71:
- case GPUTarget::G72:
- case GPUTarget::G51:
- case GPUTarget::G51BIG:
- case GPUTarget::G51LIT:
- case GPUTarget::G52:
- case GPUTarget::G52LIT:
- case GPUTarget::G76:
- if(k._input1->info()->dimension(1) == 24)
- {
- // LWS optimized for the 11x11 AlexNet convolution on Bifrost.
- lws_hint = cl::NDRange(2, 2);
- }
- else if(k._output->info()->dimension(1) == 196)
- {
- lws_hint = cl::NDRange(1, 7);
- }
- else
- {
- lws_hint = cl::NDRange(8, 8);
- }
- break;
- default:
- lws_hint = cl::NullRange;
- }
-
- k.set_lws_hint(lws_hint);
-}
-
-void tune_pooling_kernel(opencl::kernels::ClPoolingKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
- const GPUTarget gpu_target = k.get_target();
-
- // Configure the local work size (hint) from the first two dimensions of the global work size.
- // On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized
- // kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is
- // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with).
- if(k._pool_info.data_layout == DataLayout::NCHW)
- {
- if(gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT))
- {
- cl::NDRange gws = ICLKernel::gws_from_window(k.window());
- lws_hint = cl::NDRange(gws[0], gws[1], 1);
- }
- }
-
- k.set_lws_hint(lws_hint);
-}
-
-void tune_scale_kernel(opencl::kernels::ClScaleKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
- const GPUTarget gpu_target = k.get_target();
- const DataType dt = k.get_data_type();
- const InterpolationPolicy interpolation = k.get_interpolation_policy();
-
- // Configure the local work size for Bifrost, interpolation (bilinear) and datatype F32.
- // The value are obtained via exhaustive autotuning.
- if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (dt == DataType::F32) && (interpolation == InterpolationPolicy::BILINEAR))
- {
- const auto dim_0 = k.get_output_x_dim();
- if(dim_0 == 480)
- {
- lws_hint = cl::NDRange(2, 1);
- }
- else if(dim_0 == 3120)
- {
- lws_hint = cl::NDRange(2, 8);
- }
- else if(dim_0 == 4160)
- {
- lws_hint = cl::NDRange(4, 8);
- }
- k.set_lws_hint(lws_hint);
- }
-}
-} // namespace
-
-void BifrostTuner::tune_kernel_static(ICLKernel &kernel)
-{
- if(dynamic_cast<CLDirectConvolutionLayerKernel *>(&kernel) != nullptr)
- {
- tune_direct_convolution_kernel(*utils::cast::polymorphic_downcast<CLDirectConvolutionLayerKernel *>(&kernel));
- }
- else if(dynamic_cast<CLCol2ImKernel *>(&kernel) != nullptr)
- {
- tune_col2im_kernel(*utils::cast::polymorphic_downcast<CLCol2ImKernel *>(&kernel));
- }
- else if(dynamic_cast<CLIm2ColKernel *>(&kernel) != nullptr)
- {
- tune_im2col_kernel(*utils::cast::polymorphic_downcast<CLIm2ColKernel *>(&kernel));
- }
- else if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr)
- {
- tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel));
- }
- else if(dynamic_cast<opencl::kernels::ClPoolingKernel *>(&kernel) != nullptr)
- {
- tune_pooling_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClPoolingKernel *>(&kernel));
- }
- else if(dynamic_cast<opencl::kernels::ClScaleKernel *>(&kernel) != nullptr)
- {
- tune_scale_kernel(*utils::cast::polymorphic_downcast<opencl::kernels::ClScaleKernel *>(&kernel));
- }
-}
-
-void BifrostTuner::tune_kernel_dynamic(ICLKernel &kernel)
-{
- ARM_COMPUTE_UNUSED(kernel);
-}
-
-void BifrostTuner::tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors)
-{
- ARM_COMPUTE_UNUSED(kernel, tensors);
-}
-} // namespace tuners
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/tuners/MidgardTuner.cpp b/src/runtime/CL/tuners/MidgardTuner.cpp
deleted file mode 100644
index 72734f2207..0000000000
--- a/src/runtime/CL/tuners/MidgardTuner.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/tuners/MidgardTuner.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "src/core/CL/CLKernels.h"
-#include "support/Cast.h"
-
-namespace arm_compute
-{
-namespace tuners
-{
-namespace
-{
-void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k)
-{
- cl::NDRange lws_hint = k.lws_hint();
- const GPUTarget gpu_target = k.get_target();
-
- switch(gpu_target)
- {
- case GPUTarget::MIDGARD:
- case GPUTarget::T600:
- case GPUTarget::T700:
- case GPUTarget::T800:
- if(k._output->info()->dimension(1) == 196)
- {
- lws_hint = cl::NDRange(1, 7);
- }
- else
- {
- lws_hint = cl::NDRange(8, 8);
- }
- break;
- default:
- lws_hint = cl::NullRange;
- }
-
- k.set_lws_hint(lws_hint);
-}
-} // namespace
-
-void MidgardTuner::tune_kernel_static(ICLKernel &kernel)
-{
- if(dynamic_cast<CLGEMMMatrixMultiplyKernel *>(&kernel) != nullptr)
- {
- tune_gemm_kernel(*utils::cast::polymorphic_downcast<CLGEMMMatrixMultiplyKernel *>(&kernel));
- }
-}
-
-void MidgardTuner::tune_kernel_dynamic(ICLKernel &kernel)
-{
- ARM_COMPUTE_UNUSED(kernel);
-}
-
-void MidgardTuner::tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors)
-{
- ARM_COMPUTE_UNUSED(kernel, tensors);
-}
-} // namespace tuners
-} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClDirectConvolution.cpp b/src/runtime/gpu/cl/operators/ClDirectConvolution.cpp
new file mode 100644
index 0000000000..3382a6c3c5
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClDirectConvolution.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClDirectConvolution.h"
+
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/kernels/ClActivationKernel.h"
+#include "src/core/gpu/cl/kernels/ClDirectConvolutionKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace
+{
+ITensorPack select_activation_src_dst(ITensorPack &tensors)
+{
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, tensors.get_tensor(TensorType::ACL_DST));
+ pack.add_tensor(TensorType::ACL_DST, tensors.get_tensor(TensorType::ACL_DST));
+ return pack;
+}
+} // namespace
+
+void ClDirectConvolution::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ // Configure direct convolution kernel
+ auto k = std::make_unique<kernels::ClDirectConvolutionKernel>();
+ k->set_target(CLScheduler::get().target());
+ k->configure(compile_context, src, weights, biases, dst, conv_info);
+ _direct_conv_kernel = std::move(k);
+
+ // Configure border handler
+ PixelValue zero_value(0.f);
+ if(is_data_type_quantized_asymmetric(src->data_type()))
+ {
+ zero_value = PixelValue(0, src->data_type(), src->quantization_info());
+ }
+ auto b = std::make_unique<CLFillBorderKernel>();
+ b->configure(compile_context, src, _direct_conv_kernel->border_size(), BorderMode::CONSTANT, zero_value);
+ _src_border_handler = std::move(b);
+
+ if(act_info.enabled())
+ {
+ auto a = std::make_unique<kernels::ClActivationKernel>();
+ a->configure(compile_context, dst, dst, act_info);
+ _activation_kernel = std::move(a);
+ }
+
+ // Tune kernels
+ CLScheduler::get().tune_kernel_static(*_direct_conv_kernel);
+}
+
+Status ClDirectConvolution::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClDirectConvolutionKernel::validate(src, weights, biases, dst, conv_info, CLScheduler::get().target()));
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClActivationKernel::validate(dst, dst, act_info));
+ }
+ return Status{};
+}
+
+void ClDirectConvolution::run(ITensorPack &tensors)
+{
+ // Run border handler
+ CLScheduler::get().enqueue_op(*_src_border_handler.get(), tensors, false);
+ // Run direct convolution
+ CLScheduler::get().enqueue_op(*_direct_conv_kernel.get(), tensors, false);
+ // Run activation kernel
+ if(_activation_kernel)
+ {
+ auto act_pack = select_activation_src_dst(tensors);
+ CLScheduler::get().enqueue_op(*_activation_kernel.get(), act_pack, false);
+ }
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClDirectConvolution.h b/src/runtime/gpu/cl/operators/ClDirectConvolution.h
new file mode 100644
index 0000000000..e7ad927b0b
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClDirectConvolution.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_DIRECT_CONVOLUTION_H
+#define ARM_COMPUTE_CL_DIRECT_CONVOLUTION_H
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace opencl
+{
+/** Basic function to simulate a directly convolution layer. This function calls the following OpenCL kernels:
+ *
+ * -# @ref CLFillBorderKernel (executed if padding size is different from zero)
+ * -# @ref opencl::ClDirectConvolution
+ */
+class ClDirectConvolution : public IClOperator
+{
+public:
+ /** Constructor */
+ ClDirectConvolution() = default;
+ /** Set the src and dst tensors.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor. 3 lower dimensions represent a single src [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of srcs.
+ * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
+ * @param[out] dst Destination tensor. 3 lower dimensions represent a single dst [width, height, OFM], while the rest represent batch of dsts.
+ * Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ */
+ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref ClDirectConvolution
+ *
+ * @param[in] src Source tensor. 3 lower dimensions represent a single src [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of srcs.
+ * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type.
+ * @param[in] dst Destination tensor. 3 lower dimensions represent a single dst [width, height, OFM], while the rest represent batch of dsts.
+ * Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited method overridden
+ void run(ITensorPack &tensors) override;
+
+private:
+ std::unique_ptr<IClKernel> _direct_conv_kernel{ nullptr };
+ std::unique_ptr<IClKernel> _src_border_handler{ nullptr };
+ std::unique_ptr<IClKernel> _activation_kernel{ nullptr };
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_DIRECT_CONVOLUTION_H */ \ No newline at end of file
diff --git a/tests/validation/CL/UNIT/Tuner.cpp b/tests/validation/CL/UNIT/Tuner.cpp
deleted file mode 100644
index cf2513bf2c..0000000000
--- a/tests/validation/CL/UNIT/Tuner.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/tuners/BifrostTuner.h"
-#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
-#include "tests/Utils.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-TEST_SUITE(CL)
-TEST_SUITE(UNIT)
-TEST_SUITE(Tuner)
-
-/** Validates static tuning of Bifrost tuner */
-TEST_CASE(BifrostTunerSimple, framework::DatasetMode::ALL)
-{
- // Create tuner
- tuners::BifrostTuner tuner;
-
- // Create tensors
- auto src = create_tensor<CLTensor>(TensorShape(13U, 13U, 16U), DataType::F32);
- auto weights = create_tensor<CLTensor>(TensorShape(3U, 3U, 16U, 3U), DataType::F32);
- auto bias = create_tensor<CLTensor>(TensorShape(3U), DataType::F32);
- auto dst = create_tensor<CLTensor>(TensorShape(13U, 13U, 3U), DataType::F32);
-
- // Create kernel
- cl::NDRange fake_lws(2000);
- CLDirectConvolutionLayerKernel conv;
- conv.set_target(GPUTarget::G72);
-
- // Configure
- conv.configure(&src, &weights, &bias, &dst, PadStrideInfo(1, 1, 1, 1));
-
- // Hard-wire lws to kernel and validate lws
- conv.set_lws_hint(fake_lws);
- ARM_COMPUTE_EXPECT(conv.lws_hint()[0] == 2000, framework::LogLevel::ERRORS);
-
- // Tune kernel and validate
- tuner.tune_kernel_static(conv);
- ARM_COMPUTE_EXPECT(conv.lws_hint()[0] != 2000, framework::LogLevel::ERRORS);
-
- // Clear tuner
- CLScheduler::get().default_init();
-}
-TEST_SUITE_END()
-TEST_SUITE_END()
-TEST_SUITE_END()
-} // namespace validation
-} // namespace test
-} // namespace arm_compute