aboutsummaryrefslogtreecommitdiff
path: root/src/core/gpu/cl/kernels
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/gpu/cl/kernels')
-rw-r--r--src/core/gpu/cl/kernels/ClActivationKernel.cpp255
-rw-r--r--src/core/gpu/cl/kernels/ClActivationKernel.h74
-rw-r--r--src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp153
-rw-r--r--src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h77
-rw-r--r--src/core/gpu/cl/kernels/ClCastKernel.cpp163
-rw-r--r--src/core/gpu/cl/kernels/ClCastKernel.h79
-rw-r--r--src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp119
-rw-r--r--src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h76
-rw-r--r--src/core/gpu/cl/kernels/ClCopyKernel.cpp170
-rw-r--r--src/core/gpu/cl/kernels/ClCopyKernel.h71
-rw-r--r--src/core/gpu/cl/kernels/ClCropKernel.cpp136
-rw-r--r--src/core/gpu/cl/kernels/ClCropKernel.h98
-rw-r--r--src/core/gpu/cl/kernels/ClDepthConcatenateKernel.cpp138
-rw-r--r--src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h77
-rw-r--r--src/core/gpu/cl/kernels/ClDequantizeKernel.cpp153
-rw-r--r--src/core/gpu/cl/kernels/ClDequantizeKernel.h65
-rw-r--r--src/core/gpu/cl/kernels/ClDirectConv2dKernel.cpp667
-rw-r--r--src/core/gpu/cl/kernels/ClDirectConv2dKernel.h89
-rw-r--r--src/core/gpu/cl/kernels/ClElementwiseKernel.cpp536
-rw-r--r--src/core/gpu/cl/kernels/ClElementwiseKernel.h219
-rw-r--r--src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp163
-rw-r--r--src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h67
-rw-r--r--src/core/gpu/cl/kernels/ClFillKernel.cpp122
-rw-r--r--src/core/gpu/cl/kernels/ClFillKernel.h77
-rw-r--r--src/core/gpu/cl/kernels/ClFloorKernel.cpp119
-rw-r--r--src/core/gpu/cl/kernels/ClFloorKernel.h66
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp533
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h88
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp411
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h88
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp416
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h113
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp438
-rw-r--r--src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h104
-rw-r--r--src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp219
-rw-r--r--src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h78
-rw-r--r--src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp170
-rw-r--r--src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h84
-rw-r--r--src/core/gpu/cl/kernels/ClHeightConcatenateKernel.cpp131
-rw-r--r--src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h74
-rw-r--r--src/core/gpu/cl/kernels/ClMulKernel.cpp403
-rw-r--r--src/core/gpu/cl/kernels/ClMulKernel.h116
-rw-r--r--src/core/gpu/cl/kernels/ClPermuteKernel.cpp147
-rw-r--r--src/core/gpu/cl/kernels/ClPermuteKernel.h78
-rw-r--r--src/core/gpu/cl/kernels/ClPool2dKernel.cpp509
-rw-r--r--src/core/gpu/cl/kernels/ClPool2dKernel.h76
-rw-r--r--src/core/gpu/cl/kernels/ClQuantizeKernel.cpp175
-rw-r--r--src/core/gpu/cl/kernels/ClQuantizeKernel.h70
-rw-r--r--src/core/gpu/cl/kernels/ClReshapeKernel.cpp129
-rw-r--r--src/core/gpu/cl/kernels/ClReshapeKernel.h66
-rw-r--r--src/core/gpu/cl/kernels/ClScaleKernel.cpp266
-rw-r--r--src/core/gpu/cl/kernels/ClScaleKernel.h79
-rw-r--r--src/core/gpu/cl/kernels/ClSoftmaxKernel.cpp355
-rw-r--r--src/core/gpu/cl/kernels/ClSoftmaxKernel.h126
-rw-r--r--src/core/gpu/cl/kernels/ClTransposeKernel.cpp119
-rw-r--r--src/core/gpu/cl/kernels/ClTransposeKernel.h65
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp154
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h70
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp184
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h75
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp126
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h71
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp151
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h78
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp278
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h88
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp263
-rw-r--r--src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h87
-rw-r--r--src/core/gpu/cl/kernels/gemm/ClGemmHelpers.cpp116
-rw-r--r--src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h95
-rw-r--r--src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h123
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp246
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.h62
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.cpp73
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.h57
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.cpp168
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.h59
-rw-r--r--src/core/gpu/cl/kernels/gemm/native/ClGemmNativeKernelConfig.h71
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.cpp356
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.h64
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.cpp538
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.h61
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped/ClGemmReshapedKernelConfig.h69
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.cpp518
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h67
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.cpp570
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h61
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyBifrost.cpp518
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyValhall.cpp570
-rw-r--r--src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmReshapedOnlyRhsKernelConfig.h69
90 files changed, 0 insertions, 15911 deletions
diff --git a/src/core/gpu/cl/kernels/ClActivationKernel.cpp b/src/core/gpu/cl/kernels/ClActivationKernel.cpp
deleted file mode 100644
index 17a8c6498d..0000000000
--- a/src/core/gpu/cl/kernels/ClActivationKernel.cpp
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClActivationKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-#include <set>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16, DataType::F16, DataType::F32);
-
- static std::set<ActivationLayerInfo::ActivationFunction> quantized_supported_activations =
- {
- ActivationLayerInfo::ActivationFunction::RELU,
- ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
- ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
- ActivationLayerInfo::ActivationFunction::LOGISTIC,
- ActivationLayerInfo::ActivationFunction::TANH,
- ActivationLayerInfo::ActivationFunction::HARD_SWISH,
- ActivationLayerInfo::ActivationFunction::LEAKY_RELU,
- };
- const DataType data_type = src->data_type();
- const QuantizationInfo &oq_info = (dst != nullptr) ? dst->quantization_info() : src->quantization_info();
- const ActivationLayerInfo::ActivationFunction f_act = act_info.activation();
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(data_type) && (quantized_supported_activations.count(f_act) == 0),
- "For Quantized data type only hard swish, leaky relu, tanh, logistic, relu and lower/upper bounded relu are supported");
-
- ARM_COMPUTE_RETURN_ERROR_ON(data_type == DataType::QASYMM8 && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 128.f, 128)));
- ARM_COMPUTE_RETURN_ERROR_ON(data_type == DataType::QASYMM8 && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 256.f, 0)));
-
- ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
- ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
-
- ARM_COMPUTE_RETURN_ERROR_ON(data_type == DataType::QASYMM8_SIGNED && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 128.f, 0)));
- ARM_COMPUTE_RETURN_ERROR_ON(data_type == DataType::QASYMM8_SIGNED && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 256.f, -128)));
-
- // Checks performed when destination is configured
- if((dst != nullptr) && (dst->total_size() != 0))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- }
-
- return Status{};
-}
-} // namespace
-
-ClActivationKernel::ClActivationKernel()
- : _run_in_place(false)
-{
-}
-
-void ClActivationKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src);
-
- auto padding_info = get_padding_info({ src, dst });
-
- _run_in_place = (dst == nullptr) || (dst == src);
-
- if(dst != nullptr)
- {
- // Destination auto inizialitation if not yet initialized
- auto_init_if_empty(*dst, *src->clone());
- }
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, (dst != nullptr) ? dst : nullptr, act_info));
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0));
-
- const DataType dt = src->data_type();
- float a_const = act_info.a();
- float b_const = act_info.b();
-
- const ActivationLayerInfo::ActivationFunction f_act = act_info.activation();
- const bool is_quantized = is_data_type_quantized(dt);
- const bool perform_activation_in_float =
- (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
- || (f_act == ActivationLayerInfo::ActivationFunction::TANH)
- || (f_act == ActivationLayerInfo::ActivationFunction::HARD_SWISH)
- || (f_act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU);
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option_if(perform_activation_in_float, "-DFLOAT_DOMAIN");
- build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
- build_opts.add_option("-DACT=" + lower_string(string_from_activation_func(f_act)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
-
- std::string kernel_name = std::string("activation_layer");
-
- // Set quantization info build options
- if(is_quantized)
- {
- const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
-
- if(!perform_activation_in_float)
- {
- int a_const_int = 0;
- int b_const_int = 0;
-
- // Create quantized version of constants a, b if needed
- switch(dt)
- {
- case DataType::QASYMM8:
- {
- a_const_int = quantize_qasymm8(a_const, iq_info);
- b_const_int = quantize_qasymm8(b_const, iq_info);
- }
- break;
- case DataType::QASYMM8_SIGNED:
- {
- a_const_int = quantize_qasymm8_signed(a_const, iq_info);
- b_const_int = quantize_qasymm8_signed(b_const, iq_info);
- }
- break;
- case DataType::QSYMM16:
- {
- a_const_int = quantize_qsymm16(a_const, iq_info);
- b_const_int = quantize_qsymm16(b_const, iq_info);
- }
- break;
- default:
- break;
- }
- build_opts.add_option(("-DA_VAL=" + support::cpp11::to_string(a_const_int)));
- build_opts.add_option(("-DB_VAL=" + support::cpp11::to_string(b_const_int)));
- }
- else
- {
- build_opts.add_option(("-DA_VAL=" + float_to_string_with_full_precision(a_const)));
- build_opts.add_option(("-DB_VAL=" + float_to_string_with_full_precision(b_const)));
- }
-
- // Quantized value of 0 corresponds to the offset o1
- build_opts.add_option(("-DCONST_0=" + (is_data_type_quantized_asymmetric(dt) ? support::cpp11::to_string(iq_info.offset) : "0")));
- build_opts.add_option(("-DS1_VAL=" + float_to_string_with_full_precision(iq_info.scale)));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO1_VAL=" + support::cpp11::to_string(iq_info.offset));
-
- // Set correct kernel name
- kernel_name += perform_activation_in_float ? std::string("_quant_f32") : std::string("_quant");
-
- // Set scale and offset of the source and destination if they have different quantization info
- if(dst != nullptr)
- {
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- if(iq_info != oq_info)
- {
- build_opts.add_option(("-DS2_VAL=" + float_to_string_with_full_precision(oq_info.scale)));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO2_VAL=" + support::cpp11::to_string(oq_info.offset));
- }
- }
- }
- else
- {
- // Set A, B constants in build options for float types
- build_opts.add_option(("-DA_VAL=" + float_to_string_with_full_precision(a_const)));
- build_opts.add_option(("-DB_VAL=" + float_to_string_with_full_precision(b_const)));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- // Set config_id for enabling LWS tuning
- _config_id = "activation_layer_";
- _config_id += lower_string(string_from_data_type(dt));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(1));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClActivationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, act_info));
- return Status{};
-}
-
-void ClActivationKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
- ARM_COMPUTE_ERROR_ON(_run_in_place && src != dst);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- if(!_run_in_place)
- {
- add_3D_tensor_argument(idx, dst, slice);
- }
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClActivationKernel.h b/src/core/gpu/cl/kernels/ClActivationKernel.h
deleted file mode 100644
index 68c309e9e7..0000000000
--- a/src/core/gpu/cl/kernels/ClActivationKernel.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_ACTIVATION_KERNEL_H
-#define ARM_COMPUTE_CL_ACTIVATION_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the activation kernel. */
-class ClActivationKernel : public IClKernel
-{
-public:
- ClActivationKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClActivationKernel);
- /** Configure kernel for a given list of arguments
- *
- * @note If the output tensor is a nullptr, the activation function will be performed in-place
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in, out] src Source tensor info. In case of @p dst tensor = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
- * @param[out] dst Destination tensor info. Data type supported: same as @p src
- * @param[in] act_info Activation layer information.
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo act_info);
- /** Static function to check if given info will lead to a valid configuration of @ref ClActivationKernel
- *
- * @param[in] src Source tensor info. In case of @p dst tensor info = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
- * @param[in] dst Destination tensor info. Data type supported: same as @p src
- * @param[in] act_info Activation layer information.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-
-private:
- bool _run_in_place;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_ACTIVATION_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp
deleted file mode 100644
index 26f5113822..0000000000
--- a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, unsigned int batch_offset, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
-
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimX) != dst->dimension(Window::DimX));
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimY) != dst->dimension(Window::DimY));
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimZ) != dst->dimension(Window::DimZ));
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(3) + batch_offset > dst->dimension(3));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(4, src, dst);
-
- return Status{};
-}
-} // namespace
-
-ClBatchConcatenateKernel::ClBatchConcatenateKernel()
- : _batch_offset(0)
-{
-}
-
-void ClBatchConcatenateKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int batch_offset, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, batch_offset, dst));
-
- auto padding_info = get_padding_info({ src, dst });
-
- _batch_offset = batch_offset;
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0));
-
- // Add build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
- if(is_data_type_quantized_asymmetric(src->data_type()) && src->quantization_info() != dst->quantization_info())
- {
- const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "concatenate", build_opts.options());
-
- // Configure kernel window
- auto win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- win.set(3, Window::Dimension(0, src->tensor_shape()[3], 1));
- ICLKernel::configure_internal(win);
-
- // Set config_id for enabling LWS tuning
- _config_id = "concatenate_";
- _config_id += support::cpp11::to_string(3);
- _config_id += "_";
- _config_id += support::cpp11::to_string(batch_offset);
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(3));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClBatchConcatenateKernel::validate(const arm_compute::ITensorInfo *src,
- unsigned int batch_offset,
- const arm_compute::ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, batch_offset, dst));
- return Status{};
-}
-
-void ClBatchConcatenateKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window slice = window.first_slice_window_3D();
-
- const int offset_to_first_elements_in_bytes = _batch_offset * dst->info()->strides_in_bytes()[3];
-
- unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the src and dst parameters
- _kernel.setArg<cl_int>(idx, offset_to_first_elements_in_bytes);
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace opencl
-} // namespace kernels
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
deleted file mode 100644
index d9fa905e8e..0000000000
--- a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_BATCH_CONCATENATE_KERNEL_H
-#define ARM_COMPUTE_CL_BATCH_CONCATENATE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the batch concatenate kernel.
- * The src tensor will be concatenated into the destination tensor.
- */
-class ClBatchConcatenateKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClBatchConcatenateKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClBatchConcatenateKernel);
- /** Initialise the kernel's source and destination
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[in] batch_offset The offset on axis # 3.
- * @param[in,out] dst Destination tensor info. Data types supported: Same as @p src.
- *
- * @note: The dst tensor's low two dimensions can't be smaller than the src one's.
- * @note: The gaps between the two lowest dimensions of src and dst need to be divisible by 2.
- *
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int batch_offset, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClBatchConcatenateKernel
- *
- * @param[in] src Input tensor info. Data types supported: All.
- * @param[in] batch_offset The offset on axis # 3.
- * @param[in] dst Destination tensor info. Data types supported: Same as @p src.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, unsigned int batch_offset, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-
-private:
- unsigned int _batch_offset;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_BATCH_CONCATENATE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClCastKernel.cpp b/src/core/gpu/cl/kernels/ClCastKernel.cpp
deleted file mode 100644
index 7a1d5c2824..0000000000
--- a/src/core/gpu/cl/kernels/ClCastKernel.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClCastKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
-{
- ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src,
- 1,
- DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
- DataType::U16, DataType::U32, DataType::S32, DataType::F16,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst,
- 1,
- DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
- DataType::U16, DataType::U32, DataType::S32, DataType::F16,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == dst->data_type(), "src and dst data types must be different");
-
- // Validate in case of configured dst
- if(dst->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
- }
-
- return Status{};
-}
-} // namespace
-
-void ClCastKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given)
- set_shape_if_empty(*dst, src->tensor_shape());
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy));
-
- auto padding_info = get_padding_info({ src, dst });
-
- // Get data sizes
- const size_t src_size = data_size_from_type(src->data_type());
- const size_t dst_size = data_size_from_type(dst->data_type());
-
- // Get number of elements to process per iterations
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0));
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst->data_type()));
- // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined
- build_opts.add_option_if(is_data_type_float(src->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
- build_opts.add_option_if(is_data_type_float(src->data_type()) || is_data_type_float(dst->data_type()), "-DIS_DATA_TYPE_FLOAT");
- build_opts.add_option_if(is_data_type_quantized(src->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
-
- // Create kernel
- const std::string kernel_name = (src_size >= dst_size) ? "cast_down" : "cast_up";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel
- Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win);
-
- // Collapse window
- const Window &full_window = window();
- Window collapsed_window = full_window.collapse_if_possible(full_window, Window::DimZ);
- ICLKernel::configure_internal(collapsed_window);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(src->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
-}
-
-Status ClCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy));
- return Status{};
-}
-
-void ClCastKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClCastKernel.h b/src/core/gpu/cl/kernels/ClCastKernel.h
deleted file mode 100644
index 451aa9c1ab..0000000000
--- a/src/core/gpu/cl/kernels/ClCastKernel.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_CAST_KERNEL_H
-#define ARM_COMPUTE_CL_CAST_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Casts a given tensor to a new type
- *
- * @note When casting between quantized types the scale and zeroPoint are ignored
- */
-class ClCastKernel : public IClKernel
-{
-public:
- ClCastKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCastKernel);
- /** Set the src and dst of the kernel.
- *
- * Valid conversions src -> dst :
- *
- * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
- * - U8 -> S8, U16, S16, U32, S32, F16, F32
- * - U16 -> U8, S8, S16, U32, S32, F16, F32
- * - S16 -> U8, S8, U16, U32, S32, F16, F32
- * - U32 -> U8, S8, U16, S16, S32, F16, F32
- * - S32 -> U8, S8, U16, S16, U32, F16, F32
- * - F16 -> U8, S8, U16, S16, U32, F32
- * - F32 -> U8, S8, U16, S16, U32, F16
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src The source tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
- * @param[out] dst The destination tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
- * @param[in] policy Conversion policy
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClCastKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_CAST_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp b/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp
deleted file mode 100644
index 49f2f68a76..0000000000
--- a/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-void ClConvertFullyConnectedWeightsKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape,
- DataLayout data_layout)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Output tensor auto initialisation if not yet initialized
- auto_init_if_empty(*dst, *src->clone());
-
- auto padding_info = get_padding_info({ src, dst });
-
- ARM_COMPUTE_ERROR_THROW_ON(ClConvertFullyConnectedWeightsKernel::validate(src, dst, original_src_shape, data_layout));
-
- const DataLayout src_data_layout = (data_layout == DataLayout::NCHW) ? DataLayout::NHWC : DataLayout::NCHW;
-
- const int width_idx = get_data_layout_dimension_index(src_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(src_data_layout, DataLayoutDimension::HEIGHT);
- const int channel_idx = get_data_layout_dimension_index(src_data_layout, DataLayoutDimension::CHANNEL);
-
- const unsigned int num_elems_per_src_plane = original_src_shape[width_idx] * original_src_shape[height_idx];
- const unsigned int num_channels = original_src_shape[channel_idx];
-
- const unsigned int factor_1 = (data_layout == DataLayout::NCHW) ? num_elems_per_src_plane : num_channels;
- const unsigned int factor_2 = (data_layout == DataLayout::NCHW) ? num_channels : num_elems_per_src_plane;
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
- build_opts.add_option("-DFACTOR_1=" + support::cpp11::to_string(factor_1));
- build_opts.add_option("-DFACTOR_2=" + support::cpp11::to_string(factor_2));
-
- // Create kernel
- _kernel = create_kernel(compile_context, "convert_fc_weights", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps());
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const TensorShape &original_src_shape,
- DataLayout data_layout)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON(src->num_dimensions() != 2);
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(1) != original_src_shape.total_size_lower(3));
- ARM_COMPUTE_RETURN_ERROR_ON(data_layout == DataLayout::UNKNOWN);
-
- // Checks performed when dst is configured
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
- }
-
- return Status{};
-}
-
-void ClConvertFullyConnectedWeightsKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, src, window);
- add_2D_tensor_argument(idx, dst, window);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h b/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h
deleted file mode 100644
index 11ab4d2a0d..0000000000
--- a/src/core/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H
-#define ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface to convert the 2D Fully Connected weights from NCHW to NHWC or vice versa.
- *
- * @note This function can be applied to the 2D weights used by a Fully Connected layer if:
- * - It follows a Convolution layer
- * - The data layout used by the network does not match the one the model has been trained in.
- *
- * @note This function assumes the weights are already reshaped (transposed)
- */
-namespace opencl
-{
-namespace kernels
-{
-class ClConvertFullyConnectedWeightsKernel : public IClKernel
-{
-public:
- ClConvertFullyConnectedWeightsKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClConvertFullyConnectedWeightsKernel);
- /** Set the src and dst tensor.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All.
- * @param[out] dst The converted weights tensor info. Shape and Data Type: Same as @p src.
- * @param[in] original_src_shape Shape of the original src tensor (the one entering fully connected layer).
- * @param[in] data_layout The data layout the weights have been trained in.
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout);
- /** Static function to check if given info will lead to a valid configuration of @ref ClConvertFullyConnectedWeightsKernel
- *
- * @param[in] src Source weights tensor info to convert. Must be 2 dimensional. Data types supported: All.
- * @param[in] dst The converted weights tensor info. Shape and Data Type: Same as @p src.
- * @param[in] original_src_shape Shape of the original src tensor (the one entering fully connected layer).
- * @param[in] data_layout The data layout the weights have been trained in.
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLCONVERTFULLYCONNECTEDWEIGHTSKERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClCopyKernel.cpp b/src/core/gpu/cl/kernels/ClCopyKernel.cpp
deleted file mode 100644
index d6c87f8fad..0000000000
--- a/src/core/gpu/cl/kernels/ClCopyKernel.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClCopyKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Window *dst_window = nullptr)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
-
- // Validate dst if initialized
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- if(dst_window == nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst->tensor_shape());
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst_window->shape());
- }
- }
-
- return Status{};
-}
-
-} // namespace
-
-void ClCopyKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Window *dst_window)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, dst_window));
-
- auto padding_info = get_padding_info({ src, dst });
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*dst, *src);
-
- // Configure window
- const unsigned int vec_size_x = adjust_vec_size(16 / src->element_size(), src->dimension(0));
-
- const Window win_config = calculate_max_window(*src, Steps(vec_size_x));
-
- if(dst_window != nullptr)
- {
- _has_dst_window = true;
- _dst_window = Window(*dst_window);
- const int width_x = dst_window->num_iterations(0);
- const int vec_size_x_leftover = width_x % vec_size_x;
- const bool multi_access_x = width_x >= static_cast<int32_t>(vec_size_x);
-
- if(multi_access_x)
- {
- _dst_window.set(Window::DimX, Window::Dimension(dst_window->x().start(), ceil_to_multiple(dst_window->x().end(), vec_size_x), vec_size_x));
- }
-
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftover));
- }
- else
- {
- const int width_x = src->tensor_shape().x();
- const int vec_size_x_leftover = width_x % vec_size_x;
-
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftover));
- }
-
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
-
- // Build kernel
- _kernel = create_kernel(compile_context, "copy_tensor", build_opts.options());
-
- // Validate and set the window
- ICLKernel::configure_internal(win_config);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClCopyKernel::validate(const arm_compute::ITensorInfo *src, const arm_compute::ITensorInfo *dst, Window *dst_window)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, dst_window));
-
- return Status{};
-}
-
-void ClCopyKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window slice;
-
- if(_has_dst_window)
- {
- slice = window.first_slice_window_3D();
- Window out_slice = _dst_window.first_slice_window_3D();
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, out_slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice) && _dst_window.slide_window_slice_3D(out_slice));
- }
- else
- {
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- slice = collapsed.first_slice_window_3D();
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
- }
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClCopyKernel.h b/src/core/gpu/cl/kernels/ClCopyKernel.h
deleted file mode 100644
index b1b9672bcb..0000000000
--- a/src/core/gpu/cl/kernels/ClCopyKernel.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_COPY_KERNEL_H
-#define ARM_COMPUTE_CL_COPY_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to perform a copy between two tensors */
-class ClCopyKernel : public IClKernel
-{
-public:
- ClCopyKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCopyKernel);
- /** Initialize the kernel's src, dst.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[out] dst Destination tensor info. Data types supported: same as @p src.
- * @param[in] dst_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr.
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Window *dst_window = nullptr);
- /** Static function to check if given info will lead to a valid configuration of @ref ClCopyKernel
- *
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src.
- * @param[in] dst_window (Optional) Window to be used in case only copying into part of a tensor. Default is nullptr.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, Window *dst_window = nullptr);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- Window _dst_window{};
- bool _has_dst_window{};
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_COPY_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClCropKernel.cpp b/src/core/gpu/cl/kernels/ClCropKernel.cpp
deleted file mode 100644
index 1d322eefa1..0000000000
--- a/src/core/gpu/cl/kernels/ClCropKernel.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClCropKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CPP/Validate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-#include <map>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-void ClCropKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *dst_window)
-{
- configure(CLKernelLibrary::get().get_compile_context(), src, dst, start, end, batch_index, extrapolation_value, dst_window);
-}
-
-void ClCropKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index,
- float extrapolation_value, Window *dst_window)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate(src, dst, start, end, batch_index, extrapolation_value, dst_window));
-
- _start = start;
- _batch_index = batch_index;
- _extrapolation_value = extrapolation_value;
-
- const int vec_size_x = 4;
- // Create and update the window (if needed)
- Window win = calculate_max_window(*dst);
-
- if(dst_window != nullptr)
- {
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(win, *dst_window);
- win = *dst_window;
- }
-
- const int dst_width_x = win.num_iterations(0);
- const bool multi_access_x = dst_width_x >= vec_size_x;
- const bool remainder_x = dst_width_x % vec_size_x > 0;
-
- if(multi_access_x)
- {
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(dst_width_x - vec_size_x, 0)));
- build_opts.add_option_if(start.x > end.x, "-DWIDTH_FLIPPED=");
- build_opts.add_option_if(start.y > end.y, "-DHEIGHT_FLIPPED=");
- _kernel = create_kernel(compile_context, "crop_tensor", build_opts.options());
-}
-
-Status ClCropKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value, Window *dst_window)
-{
- ARM_COMPUTE_UNUSED(extrapolation_value, dst_window);
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
- ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().num_dimensions() > 4);
- ARM_COMPUTE_RETURN_ERROR_ON(start.x < 0 || start.y < 0 || end.x < 0 || end.y < 0);
- ARM_COMPUTE_RETURN_ERROR_ON(start.x >= static_cast<int32_t>(src->dimension(1)) || start.y >= static_cast<int32_t>(src->dimension(2))
- || end.x >= static_cast<int32_t>(src->dimension(1)) || end.y >= static_cast<int32_t>(src->dimension(2)));
- ARM_COMPUTE_RETURN_ERROR_ON(batch_index >= src->dimension(3));
- if(dst_window != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(dst_window->x().step() != 1);
- }
- if(dst->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(dst->num_dimensions() > 3);
- }
- return Status{};
-}
-
-void ClCropKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window in_slice = Window();
- in_slice.use_tensor_dimensions(src->info()->tensor_shape());
- in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), ceil_to_multiple(in_slice.x().end(), window.x().step()), window.x().step()));
- in_slice.set(3, Window::Dimension(_batch_index, _batch_index + 1, 1));
-
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, in_slice);
- add_3D_tensor_argument(idx, dst, window);
- add_argument(idx, _start.x);
- add_argument(idx, _start.y);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClCropKernel.h b/src/core/gpu/cl/kernels/ClCropKernel.h
deleted file mode 100644
index ec0f8e58da..0000000000
--- a/src/core/gpu/cl/kernels/ClCropKernel.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLCROPKERNEL_H
-#define ARM_COMPUTE_CLCROPKERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to perform a copy between two tensors */
-class ClCropKernel : public IClKernel
-{
-public:
- ClCropKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCropKernel);
- /** Configure kernel
- *
- * @note Supported tensor rank: up to 4
- *
- * @param[in] src Source tensor info. Data type supported: All. Data layouts supported: NHWC.
- * @param[out] dst Destination tensor info. Data type supported: F32
- * @param[in] start Coordinates of where to start cropping the image.
- * @param[in] end Coordinates of where to end cropping the image.
- * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p src.
- * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
- * @param[in] dst_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
- */
- void configure(const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0, Window *dst_window = nullptr);
- /** Configure kernel
- *
- * @note Supported tensor rank: up to 4
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data type supported: All. Data layouts supported: NHWC.
- * @param[out] dst Destination tensor info. Data type supported: F32
- * @param[in] start Coordinates of where to start cropping the image.
- * @param[in] end Coordinates of where to end cropping the image.
- * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p src.
- * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
- * @param[in] dst_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
- Window *dst_window = nullptr);
-
- /** Static function to check if given info will lead to a valid configuration of @ref CLStridedSliceKernel
- *
- * @note Supported tensor rank: up to 4
- *
- * @param[in] src Source tensor info. Data type supported: All. Data layouts supported: NHWC.
- * @param[in] dst Destination tensor info. Data type supported: F32
- * @param[in] start Coordinates of where to start cropping the image.
- * @param[in] end Coordinates of where to end cropping the image.
- * @param[in] batch_index Fourth dimension index of the 3D image to crop in @p src.
- * @param[in] extrapolation_value Value to be used for values outside of the image. Default is 0.
- * @param[in] dst_window Output window to be used in case cropped image is being copied into a tensor. Default is nullptr.
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, Coordinates2D start, Coordinates2D end, uint32_t batch_index, float extrapolation_value = 0,
- Window *dst_window = nullptr);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- Coordinates2D _start{};
- uint32_t _batch_index{};
- float _extrapolation_value{};
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLCROPKERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.cpp b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.cpp
deleted file mode 100644
index 4039570da4..0000000000
--- a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, unsigned int depth_offset, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
-
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimX) != dst->dimension(Window::DimX));
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimY) != dst->dimension(Window::DimY));
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(2) + depth_offset > dst->dimension(2));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(3, src, dst);
-
- return Status{};
-}
-} // namespace
-
-ClDepthConcatenateKernel::ClDepthConcatenateKernel()
- : _depth_offset(0)
-{
-}
-
-void ClDepthConcatenateKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int depth_offset, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, depth_offset, dst));
-
- auto padding_info = get_padding_info({ src, dst });
-
- _depth_offset = depth_offset;
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0));
-
- // Add build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
- if(is_data_type_quantized_asymmetric(src->data_type()) && src->quantization_info() != dst->quantization_info())
- {
- const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "concatenate", build_opts.options());
-
- // Configure kernel window
- auto win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- win.set(Window::DimZ, Window::Dimension(0, src->tensor_shape().z(), 1));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClDepthConcatenateKernel::validate(const arm_compute::ITensorInfo *src,
- unsigned int depth_offset,
- const arm_compute::ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, depth_offset, dst));
- return Status{};
-}
-
-void ClDepthConcatenateKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window slice = window.first_slice_window_3D();
-
- const int offset_to_first_elements_in_bytes = _depth_offset * dst->info()->strides_in_bytes()[2];
-
- unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the src and dst parameters
- _kernel.setArg<cl_int>(idx, offset_to_first_elements_in_bytes);
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
deleted file mode 100644
index 103ef00695..0000000000
--- a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_DEPTH_CONCATENATE_KERNEL_H
-#define ARM_COMPUTE_CL_DEPTH_CONCATENATE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the depth concatenate kernel.
- * The src tensor will be concatenated into the dst tensor.
- */
-class ClDepthConcatenateKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClDepthConcatenateKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDepthConcatenateKernel);
- /** Initialise the kernel's source and destination
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[in] depth_offset The offset on the Z axis.
- * @param[in,out] dst Destination tensor info. Data types supported: Same as @p src.
- *
- * @note: The dst tensor's low two dimensions can't be smaller than the src one's.
- * @note: The gaps between the two lowest dimensions of src and dst need to be divisible by 2.
- *
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int depth_offset, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClDepthConcatenateKernel
- *
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in] depth_offset The offset on the Z axis.
- * @param[in] dst Destination tensor info. Data types supported: Same as @p src.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, unsigned int depth_offset, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-
-private:
- unsigned int _depth_offset;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_DEPTH_CONCATENATE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
deleted file mode 100644
index f2758b759f..0000000000
--- a/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8, DataType::QSYMM16);
-
- if(dst->tensor_shape().total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
- }
-
- return Status{};
-}
-} // namespace
-
-void ClDequantizeKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*dst, src->tensor_shape(), 1, DataType::F32);
-
- auto padding_info = get_padding_info({ src, dst });
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
-
- const int vec_size_x = 16 / dst->element_size();
- const int output_width_x = dst->tensor_shape().x();
- const bool multi_access_x = (output_width_x / vec_size_x > 0);
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(src->data_type());
- std::string kernel_name = "dequantization_layer";
-
- // Create kernel
- CLBuildOptions build_opts;
- if(!is_quantized_per_channel)
- {
- const UniformQuantizationInfo qinfo = src->quantization_info().uniform();
- const int qoffset = is_data_type_quantized_asymmetric(src->data_type()) ? qinfo.offset : 0;
- build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qoffset));
- }
- else
- {
- kernel_name += "_per_channel";
- kernel_name += src->data_layout() == DataLayout::NCHW ? "_nchw" : "_nhwc";
- }
-
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DDATA_TYPE_SRC=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DDATA_TYPE_DST=" + get_cl_type_from_data_type(dst->data_type()));
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
-
- // Create kernel name
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*dst);
- if(multi_access_x)
- {
- win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
- return Status{};
-}
-
-void ClDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const bool is_quantized_per_channel = is_data_type_quantized_per_channel(src->info()->data_type());
-
- // Collapse windo
- Window new_window = is_quantized_per_channel ? window.collapse_if_possible(ICLKernel::window(), 4) : window.collapse_if_possible(ICLKernel::window(), 3);
- Window slice = new_window.first_slice_window_3D();
-
- if(is_quantized_per_channel)
- {
- unsigned int idx = num_arguments_per_3D_tensor() * 2; //Skip the input and output parameters
- _kernel.setArg(idx++, src->quantization().scale->cl_buffer());
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(new_window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClDequantizeKernel.h b/src/core/gpu/cl/kernels/ClDequantizeKernel.h
deleted file mode 100644
index 33e0164cc9..0000000000
--- a/src/core/gpu/cl/kernels/ClDequantizeKernel.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H
-#define ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the dequantization layer kernel. */
-class ClDequantizeKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClDequantizeKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizeKernel);
- /** Initialise the kernel's input and output
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
- * @param[out] dst Destination tensor info. Data types supported: F16/F32.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClDequantizeKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/core/gpu/cl/kernels/ClDirectConv2dKernel.cpp
deleted file mode 100644
index 94c4044bff..0000000000
--- a/src/core/gpu/cl/kernels/ClDirectConv2dKernel.cpp
+++ /dev/null
@@ -1,667 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClDirectConv2dKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/PixelValue.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLUtils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights);
-
- const DataLayout data_layout = src->data_layout();
- const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != src->dimension(channel_idx),
- "Weights feature map dimension should match the respective src's one");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9)
- && std::get<0>(conv_info.stride()) > 2,
- "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(data_layout != DataLayout::NHWC && !is_data_type_float(src->data_type()) && act_info.enabled(),
- "Activation supported only for floating point and NHWC.");
-
- if(data_layout == DataLayout::NCHW)
- {
- if(is_data_type_quantized(src->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
- "Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported with quantized data types");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5,
- "Kernel sizes other than 1x1, 3x3 or 5x5 are not supported with float data types");
- }
- }
-
- if(biases != nullptr)
- {
- if(is_data_type_quantized_asymmetric(src->data_type()))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(3),
- "Biases size and number of src feature maps should match");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1,
- "Biases should be one dimensional");
- }
-
- // Checks performed when dst is configured
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(),
- misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- }
-
- const auto data_type = src->data_type();
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
- }
- return Status{};
-}
-
-inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
- DataType data_type, DataLayout data_layout)
-{
- return gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT)
- && (kernel_size <= 5)
- && (conv_stride_x == 1) && (conv_stride_y == 1)
- && (data_type == DataType::F32)
- && (data_layout == DataLayout::NCHW);
-}
-
-inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
- unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
- unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *src)
-{
- const DataType data_type = src->data_type();
- const DataLayout data_layout = src->data_layout();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost_nchw(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
-
- if(run_optimized_bifrost)
- {
- // Configure kernel window
- switch(kernel_size)
- {
- case 1:
- {
- num_elems_read_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 4;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 4;
- break;
- }
- case 3:
- {
- num_elems_read_per_iteration_x = 6;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 3;
- break;
- }
- case 5:
- {
- num_elems_read_per_iteration_x = 8;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 2;
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost");
- }
- }
- }
- else
- {
- num_elems_read_per_iteration_y = kernel_size;
- num_elems_written_per_iteration_x = 8;
- num_elems_written_per_iteration_y = 1;
- switch(kernel_size)
- {
- case 1:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 8;
- break;
- case 2:
- num_elems_read_per_iteration_x = 16;
- break;
- case 3:
- switch(src->element_size())
- {
- case 1:
- num_elems_read_per_iteration_x = 28;
- break;
- case 2:
- num_elems_read_per_iteration_x = 24;
- break;
- case 4:
- num_elems_read_per_iteration_x = 22;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid data size");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 3:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 10;
- break;
- case 2:
- num_elems_read_per_iteration_x = 17;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 5:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 12;
- break;
- case 2:
- num_elems_read_per_iteration_x = 20;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 9:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 16;
- break;
- case 2:
- num_elems_read_per_iteration_x = 24;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid direct convolution size");
- }
- }
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst, const PadStrideInfo &conv_info, const GPUTarget target)
-{
- const DataLayout data_layout = src->data_layout();
-
- // Get dst shape
- TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*dst, output_shape,
- 1,
- src->data_type(),
- src->quantization_info());
-
- if(data_layout == DataLayout::NHWC)
- {
- const unsigned int vec_size = std::min(static_cast<unsigned int>(dst->tensor_shape()[0]), 4u);
- unsigned int num_rows = 1U;
- if(dst->tensor_shape()[0] > 16)
- {
- num_rows = src->data_type() == DataType::F32 ? 2U : 4U;
- }
-
- // Create window and update padding
- Window win = calculate_max_window(output_shape, Steps(vec_size, num_rows));
- return std::make_pair(Status{}, win);
- }
- else if(data_layout == DataLayout::NCHW)
- {
- const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int kernel_size = weights->dimension(width_idx);
-
- unsigned int num_elems_read_per_iteration_x = 0;
- unsigned int num_elems_read_per_iteration_y = 0;
- unsigned int num_elems_written_per_iteration_x = 0;
- unsigned int num_elems_written_per_iteration_y = 0;
-
- unsigned int conv_pad_left = conv_info.pad_left();
- unsigned int conv_pad_top = conv_info.pad_top();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
- num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
- kernel_size, conv_info, target, src);
-
- // Create window and update padding
- bool window_changed = false;
- Window win = calculate_max_window(*dst, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
-
- AccessWindowRectangle input_access(src, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
- AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
- AccessWindowRectangle output_access(dst, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
- window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
-}
-
-bool export_to_cl_image_support(ITensorInfo *tensor, GPUTarget gpu_target, DataLayout data_layout)
-{
- if(tensor->tensor_shape()[0] % 4 || (data_layout != DataLayout::NHWC))
- {
- return false;
- }
-
- // If not floating point
- if(!is_data_type_float(tensor->data_type()))
- {
- return false;
- }
-
- if(gpu_target == GPUTarget::G71 || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD)
- {
- return false;
- }
-
- // Check if the cl_khr_image2d_from_buffer extension is supported on the target platform
- if(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()))
- {
- return false;
- }
-
- // Check cl image pitch alignment
- if(get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0)
- {
- return false;
- }
-
- const size_t image_w = tensor->tensor_shape()[0] / 4;
- const size_t image_h = tensor->tensor_shape()[1] * tensor->tensor_shape()[2] * tensor->tensor_shape()[3];
- const size_t max_image_w = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>();
- const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>();
-
- if(image_w > max_image_w || image_h > max_image_h)
- {
- return false;
- }
-
- return true;
-}
-
-} // namespace
-
-BorderSize ClDirectConv2dKernel::border_size() const
-{
- return _border_size;
-}
-
-void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
-
- // Perform validation
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, dst, conv_info, act_info));
-
- const int conv_stride_x = std::get<0>(conv_info.stride());
- const int conv_stride_y = std::get<1>(conv_info.stride());
-
- _data_layout = src->data_layout();
- _conv_info = conv_info;
-
- const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
- const unsigned int kernel_size = weights->dimension(width_idx);
- const DataType data_type = src->data_type();
-
- const GPUTarget gpu_target = get_target();
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, weights, dst, conv_info, gpu_target);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- std::stringstream kernel_name;
- CLBuildOptions build_options;
-
- if(_data_layout == DataLayout::NHWC)
- {
- _border_size = BorderSize();
-
- kernel_name << "direct_convolution_nhwc";
-
- const unsigned int n0 = win_config.second.x().step();
- const unsigned int m0 = win_config.second.y().step();
- const unsigned int k0 = adjust_vec_size(is_data_type_quantized(data_type) ? 16u : 8u, src->dimension(channel_idx));
- const unsigned int partial_store_n0 = dst->dimension(channel_idx) % n0;
- const unsigned int pad_left = conv_info.pad_left();
- const unsigned int pad_top = conv_info.pad_top();
- const bool export_to_cl_image = export_to_cl_image_support(weights, gpu_target, _data_layout);
-
- // Update the padding for the weights tensor if we can export to cl_image
- if(export_to_cl_image)
- {
- gemm::update_padding_for_cl_image(weights);
- }
-
- if(biases != nullptr)
- {
- build_options.add_option(std::string("-DHAS_BIAS"));
- build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(biases->data_type())));
- }
-
- build_options.add_option("-cl-fast-relaxed-math");
- build_options.add_option("-DSRC_TENSOR_TYPE=BUFFER");
- build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(width_idx)));
- build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(height_idx)));
- build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src->dimension(channel_idx)));
- build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_options.add_option("-DDST_TENSOR_TYPE=BUFFER");
- build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(width_idx)));
- build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(height_idx)));
- build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(channel_idx)));
- build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
- build_options.add_option_if_else(export_to_cl_image, "-DWEI_TENSOR_TYPE=IMAGE", "-DWEI_TENSOR_TYPE=BUFFER");
- build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(weights->dimension(width_idx)));
- build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(weights->dimension(height_idx)));
- build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(weights->data_type()));
- build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x));
- build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_stride_y));
- build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
- build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
- build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
- build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
- build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
- build_options.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0));
- build_options.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
-
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
-
- PixelValue zero_value = PixelValue(0, src->data_type(), src->quantization_info());
- int zero_value_s32;
- zero_value.get(zero_value_s32);
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_options.add_option("-DIS_QUANTIZED");
- build_options.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_options.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
- build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
- build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
- build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
- build_options.add_option("-DZERO_VALUE=" + support::cpp11::to_string(zero_value_s32));
- build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
- }
- else
- {
- build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_options.add_option("-DZERO_VALUE=" + support::cpp11::to_string(0));
- build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(0));
- build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(0));
- build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(0));
- build_options.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_options.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- }
- }
- else
- {
- _border_size = BorderSize(src->padding());
-
- kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
-
- build_options.add_option_if(biases != nullptr, std::string("-DHAS_BIAS"));
-
- const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, conv_stride_x, conv_stride_y, kernel_size, data_type, _data_layout);
-
- if(run_optimized_for_bifrost)
- {
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
-
- kernel_name << "_f32_bifrost";
- }
- else
- {
- build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
- build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
- build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x)));
- build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
-
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
- build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
- build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
- build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
- build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
-
- kernel_name.str("direct_convolution_quantized");
- }
- }
- }
-
- _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name.str();
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(data_type));
- _config_id += "_";
- _config_id += support::cpp11::to_string(kernel_size);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().left);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().top);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().right);
- _config_id += "_";
- _config_id += support::cpp11::to_string(border_size().bottom);
- _config_id += "_";
- _config_id += support::cpp11::to_string(conv_stride_x);
- _config_id += "_";
- _config_id += support::cpp11::to_string(conv_stride_y);
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(width_idx));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(height_idx));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(_data_layout));
-}
-
-Status ClDirectConv2dKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const GPUTarget target)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), weights->clone().get(), dst->clone().get(), conv_info, target).first);
-
- return Status{};
-}
-
-void ClDirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- // Get initial windows
- Window slice = window.first_slice_window_3D();
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto weights = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- const auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- if(_data_layout == DataLayout::NHWC)
- {
- cl::Image2D weights_cl_image;
-
- const size_t dim_y_collapsed = ceil_to_multiple(dst->info()->dimension(1) * dst->info()->dimension(2), slice.y().step());
- const bool export_to_cl_image = export_to_cl_image_support(weights->info(), get_target(), _data_layout);
-
- slice.set(Window::DimY, Window::Dimension(0, dim_y_collapsed, slice.y().step()));
- slice.set(Window::DimZ, Window::Dimension(0, dst->info()->dimension(3), 1));
-
- if(export_to_cl_image)
- {
- const size_t image_w = weights->info()->dimension(0) / 4;
- const size_t image_h = weights->info()->dimension(1) * weights->info()->dimension(2) * weights->info()->dimension(3);
- const TensorShape shape2d(image_w, image_h);
- const size_t image_row_pitch = weights->info()->strides_in_bytes()[1];
-
- // Export cl_buffer to cl_image
- weights_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), weights->cl_buffer(), shape2d, weights->info()->data_type(), image_row_pitch);
- }
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice);
- add_4D_tensor_argument(idx, dst, slice);
- if(export_to_cl_image)
- {
- _kernel.setArg(idx++, weights_cl_image);
- }
- add_4D_tensor_argument(idx, weights, slice);
- if(biases != nullptr)
- {
- add_1D_tensor_argument(idx, biases, slice);
- }
- enqueue(queue, *this, slice, lws_hint());
- }
- else
- {
- Window win_in = window;
-
- win_in.adjust(Window::DimX, -_conv_info.pad_left(), true);
- win_in.adjust(Window::DimY, -_conv_info.pad_top(), true);
-
- const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
-
- const int conv_stride_x = std::get<0>(_conv_info.stride());
- const int conv_stride_y = std::get<1>(_conv_info.stride());
-
- win_in.set_dimension_step(width_idx, window[width_idx].step() * conv_stride_x);
- win_in.set_dimension_step(height_idx, window[height_idx].step() * conv_stride_y);
-
- Window slice_in = win_in.first_slice_window_3D();
- unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
- add_3D_tensor_argument(idx1, weights, slice);
-
- if(biases != nullptr)
- {
- Window slice_biases;
- slice_biases.use_tensor_dimensions(biases->info()->tensor_shape());
- add_1D_tensor_argument(idx1, biases, slice_biases);
- }
-
- _kernel.setArg(idx1++, static_cast<unsigned int>(weights->info()->strides_in_bytes()[3]));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice_in);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
- }
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h b/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h
deleted file mode 100644
index e76666fd36..0000000000
--- a/src/core/gpu/cl/kernels/ClDirectConv2dKernel.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H
-#define ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the direct convolution kernel. */
-class ClDirectConv2dKernel : public IClKernel
-{
-public:
- ClDirectConv2dKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDirectConv2dKernel);
- /** Set the src, weights, biases and dst tensors info.
- *
- * @note: Due to set_valid_region(), thus src/weights/biases cannot be const. Need to change this once the set_valid_region() is removed.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3
- * 3x3 convolution with stride_x = 1/2, stride_y = 1/2
- * 5x5 convolution with stride_x = 1/2, stride_y = 1/2
- * 9x9 convolution with stride_x = 1/2, stride_y = 1/2
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src The src tensor info to convolve. 3 lower dimensions represent a single src [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32.
- * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * The 3rd dimension must be the same as the src's volume 3rd dimension.
- * Data type supported:Same as @p src.
- * @param[in] biases Biases tensor info. Biases are 1D tensor with dimension [OFM].
- * Data type supported: Should match @p src data type, except for src of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type
- * @param[out] dst Output tensor info.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p src.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- * @param[in] act_info Contains activaton information described in @ref ActivationLayerInfo.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to ClDirectConv2dKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const GPUTarget target);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-public:
- DataLayout _data_layout{};
- BorderSize _border_size{};
- PadStrideInfo _conv_info{};
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp b/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp
deleted file mode 100644
index 335ee9c392..0000000000
--- a/src/core/gpu/cl/kernels/ClElementwiseKernel.cpp
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClElementwiseKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "src/common/utils/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-#include <map>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-constexpr unsigned int vector_size_byte_opencl = 16;
-
-std::map<ArithmeticOperation, std::string> supported_arithmetic_ops =
-{
- { ArithmeticOperation::ADD, "ADD" },
- { ArithmeticOperation::SUB, "SUB" },
- { ArithmeticOperation::DIV, "DIV" },
- { ArithmeticOperation::SQUARED_DIFF, "SQUARED_DIFF" },
- { ArithmeticOperation::MIN, "MIN" },
- { ArithmeticOperation::MAX, "MAX" },
- { ArithmeticOperation::POWER, "POWER" },
- { ArithmeticOperation::PRELU, "PRELU" },
-};
-
-std::map<ArithmeticOperation, std::string> supported_sat_arithmetic_ops =
-{
- { ArithmeticOperation::ADD, "ADD" },
- { ArithmeticOperation::SUB, "SUB" },
-};
-
-std::string generate_id_for_tuning_common(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst)
-{
- std::string config_id;
- // Set config_id for enabling LWS tuning
- config_id = kernel_name;
- config_id += "_";
- config_id += lower_string(string_from_data_type(src1.data_type()));
- config_id += "_";
- config_id += support::cpp11::to_string(dst.dimension(0));
- config_id += "_";
- config_id += support::cpp11::to_string(dst.dimension(1));
- return config_id;
-}
-
-Status validate_arguments_with_float_only_supported_rules(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(&src1, &src2, &dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src1, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &src2);
-
- const TensorShape out_shape = TensorShape::broadcast_shape(src1.tensor_shape(), src2.tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
-
- // Validate in case of configured dst
- if(dst.total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst.tensor_shape(), 0),
- "Wrong shape for dst");
- }
-
- return Status{};
-}
-
-Status validate_arguments_divide_operation(const ITensorInfo* src1, const ITensorInfo* src2, const ITensorInfo* dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 1, DataType::F16, DataType::F32, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2);
-
- const TensorShape out_shape = TensorShape::broadcast_shape(src1->tensor_shape(), src2->tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
-
- // Validate in case of configured dst
- if(dst->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::F16, DataType::F32, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0),
- "Wrong shape for dst");
- }
-
- return Status{};
-}
-
-Status validate_arguments_with_arithmetic_rules(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::S32, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src2);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src2, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::S32, DataType::F32);
-
- const bool is_quantized = is_data_type_quantized(src1.data_type()) || is_data_type_quantized(src2.data_type());
- if(is_quantized)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &src2);
-
- if(is_data_type_quantized_symmetric(src1.data_type()))
- {
- const int32_t in1_offset = src1.quantization_info().uniform().offset;
- const int32_t in2_offset = src2.quantization_info().uniform().offset;
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(in1_offset != 0, "For quantized symmetric, offset must be zero");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(in2_offset != 0, "For quantized symmetric, offset must be zero");
- }
- }
-
- const TensorShape out_shape = TensorShape::broadcast_shape(src1.tensor_shape(), src2.tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
-
- // Validate in case of configured dst
- if(dst.total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::S32, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((dst.data_type() == DataType::U8) && ((src1.data_type() != DataType::U8) || (src2.data_type() != DataType::U8)),
- "dst can only be U8 if both inputs are U8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst.tensor_shape(), 0),
- "Wrong shape for dst");
-
- if(is_quantized)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &dst);
-
- if(is_data_type_quantized_symmetric(dst.data_type()))
- {
- const int32_t offset = dst.quantization_info().uniform().offset;
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(offset != 0, "For quantized symmetric, offset must be zero");
- }
- }
- }
- return Status{};
-}
-
-CLBuildOptions generate_build_options_with_arithmetic_rules(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst, const std::string &operation_string)
-{
- CLBuildOptions build_opts;
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / dst.element_size(), dst.dimension(0));
-
- build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(src1.data_type()));
- build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(src2.data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst.data_type()));
- build_opts.add_option("-DVEC_SIZE_IN1=" + support::cpp11::to_string(src1.dimension(0) == 1 ? 1 : num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_IN2=" + support::cpp11::to_string(src2.dimension(0) == 1 ? 1 : num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_OUT=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(dst.dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DOP=" + operation_string);
- if(is_data_type_quantized(src1.data_type()))
- {
- const UniformQuantizationInfo iq1info = src1.quantization_info().uniform();
- const UniformQuantizationInfo iq2info = src2.quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = dst.quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + support::cpp11::to_string(iq1info.offset));
- build_opts.add_option("-DOFFSET_IN2=" + support::cpp11::to_string(iq2info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + support::cpp11::to_string(oqinfo.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1info.scale));
- build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oqinfo.scale));
- }
- build_opts.add_option_if(src1.data_type() == DataType::S32, "-DS32");
-
- return build_opts;
-}
-
-std::pair<Status, Window> configure_window_arithmetic_common(ITensorInfo &dst)
-{
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / dst.element_size(), dst.dimension(0));
- Window win = calculate_max_window(dst, Steps(num_elems_processed_per_iteration));
- return std::make_pair(Status{}, win);
-}
-
-std::pair<Status, Window> validate_and_configure_window_for_arithmetic_operators(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst)
-{
- const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(src1, src2);
- const TensorShape &out_shape = broadcast_pair.first;
-
- set_shape_if_empty(dst, out_shape);
-
- if(src1.data_type() == DataType::S16 || src2.data_type() == DataType::S16)
- {
- set_format_if_unknown(dst, Format::S16);
- }
- else if(src1.data_type() == DataType::F16 || src2.data_type() == DataType::F16)
- {
- set_format_if_unknown(dst, Format::F16);
- }
- else if(src1.data_type() == DataType::F32 || src2.data_type() == DataType::F32)
- {
- set_format_if_unknown(dst, Format::F32);
- }
- else if(src1.data_type() == DataType::QASYMM8 || src2.data_type() == DataType::QASYMM8)
- {
- set_data_type_if_unknown(dst, DataType::QASYMM8);
- }
- else if(src1.data_type() == DataType::QASYMM8_SIGNED || src2.data_type() == DataType::QASYMM8_SIGNED)
- {
- set_data_type_if_unknown(dst, DataType::QASYMM8_SIGNED);
- }
- else if(src1.data_type() == DataType::QSYMM16 || src2.data_type() == DataType::QSYMM16)
- {
- set_data_type_if_unknown(dst, DataType::QSYMM16);
- }
-
- return configure_window_arithmetic_common(dst);
-}
-
-std::pair<Status, Window> validate_and_configure_window_for_logical_binary_operators(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst)
-{
- const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(src1, src2);
- const TensorShape &out_shape = broadcast_pair.first;
-
- set_shape_if_empty(dst, out_shape);
- set_data_type_if_unknown(dst, DataType::U8);
-
- // The arithmetic utility functions can be share
- return configure_window_arithmetic_common(dst);
-}
-
-std::pair<Status, Window> validate_and_configure_window_for_division(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst)
-{
- const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(src1, src2);
- const TensorShape &out_shape = broadcast_pair.first;
- auto_init_if_empty(dst, out_shape, 1, src1.data_type());
- return configure_window_arithmetic_common(dst);
-}
-} // namespace
-
-void ClElementwiseKernel::configure_common(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst)
-{
- configure_common(CLKernelLibrary::get().get_compile_context(), src1, src2, dst);
-}
-
-void ClElementwiseKernel::configure_common(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst)
-{
- // Configure kernel window
- auto win_config = validate_and_configure_window(*src1, *src2, *dst);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-
- _src1 = src1;
- _src2 = src2;
- _dst = dst;
-
- std::string kernel_name = "elementwise_operation_" + name();
- if(is_data_type_quantized(src1->data_type()))
- {
- kernel_name += "_quantized";
- }
-
- // Set kernel build options
- CLBuildOptions build_opts = generate_build_options(*src1, *src2, *dst);
- if(_act_info.enabled())
- {
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(_act_info.activation())));
- build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(_act_info.a()));
- build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(_act_info.b()));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- ICLKernel::configure_internal(win_config.second);
-
- _config_id = generate_id_for_tuning(kernel_name, *src1, *dst);
-}
-
-void ClElementwiseKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const TensorShape &in_shape1 = src_0->info()->tensor_shape();
- const TensorShape &in_shape2 = src_1->info()->tensor_shape();
- const TensorShape &out_shape = dst->info()->tensor_shape();
-
- bool can_collapse = true;
- const bool is_vector = in_shape1.num_dimensions() == 1 || in_shape2.num_dimensions() == 1;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1 && !is_vector)
- {
- can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
- for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
- {
- can_collapse = (in_shape1[d] == in_shape2[d]);
- }
- }
-
- bool has_collapsed = false;
- Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
-
- const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
- const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
-
- Window slice = collapsed.first_slice_window_3D();
- Window slice_src1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
- Window slice_src2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src_0, slice_src1);
- add_3D_tensor_argument(idx, src_1, slice_src2);
- add_3D_tensor_argument(idx, dst, slice);
-
- enqueue(queue, *this, slice, lws_hint());
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_src1));
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_src2));
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-
-/** Logical binary */
-
-void ClLogicalBinaryKernel::configure(const ClCompileContext &compile_context, LogicalOperation op, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_ERROR_THROW_ON(ClLogicalBinaryKernel::validate(op, src1, src2, dst));
- _op = op;
- configure_common(compile_context, src1, src2, dst);
-}
-
-Status ClLogicalBinaryKernel::validate(LogicalOperation op, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst)
-{
- ARM_COMPUTE_UNUSED(op);
- ARM_COMPUTE_ASSERT(op != LogicalOperation::Unknown && op != LogicalOperation::Not);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst);
-
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 1, DataType::U8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2);
-
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*src1, *src2, *dst));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_logical_binary_operators(*src1->clone(), *src2->clone(), *dst->clone()).first);
-
- return Status{};
-}
-
-std::string ClLogicalBinaryKernel::name()
-{
- switch(_op)
- {
- case LogicalOperation::And:
- return "AND";
- case LogicalOperation::Or:
- return "OR";
- case LogicalOperation::Not:
- /* fall through */
- default:
- ARM_COMPUTE_ASSERT(true);
- }
- return "";
-}
-
-std::pair<Status, Window> ClLogicalBinaryKernel::validate_and_configure_window(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst)
-{
- return validate_and_configure_window_for_logical_binary_operators(src1, src2, dst);
-}
-
-CLBuildOptions ClLogicalBinaryKernel::generate_build_options(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst)
-{
- // The arithmetic utility functions can be share
- return generate_build_options_with_arithmetic_rules(src1, src2, dst, name());
-}
-
-std::string ClLogicalBinaryKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst)
-{
- return generate_id_for_tuning_common(kernel_name, src1, dst);
-}
-
-/** Arithmetic operations with saturation*/
-void ClSaturatedArithmeticKernel::configure(const ClCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output,
- const ConvertPolicy &policy,
- const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_ERROR_THROW_ON(ClSaturatedArithmeticKernel::validate(op, input1, input2, output, policy, act_info));
- auto padding_info = get_padding_info({ input1, input2, output });
-
- _policy = policy;
- _op = op;
- _act_info = act_info;
- configure_common(compile_context, input1, input2, output);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClSaturatedArithmeticKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
- const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(op, policy);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
-
- return Status{};
-}
-
-std::pair<Status, Window> ClSaturatedArithmeticKernel::validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
-{
- return validate_and_configure_window_for_arithmetic_operators(input1, input2, output);
-}
-
-CLBuildOptions ClSaturatedArithmeticKernel::generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
-{
- const bool has_float_out = is_data_type_float(output.data_type());
- auto build_options = generate_build_options_with_arithmetic_rules(input1, input2, output, name());
- build_options.add_option((_policy == ConvertPolicy::WRAP || has_float_out) ? "-DWRAP" : "-DSATURATE");
- return build_options;
-}
-
-std::string ClSaturatedArithmeticKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
-{
- auto config_id = generate_id_for_tuning_common(kernel_name, input1, output);
- config_id += (_policy == ConvertPolicy::WRAP) ? "_wrap_" : "_saturate_";
- config_id += lower_string(string_from_data_layout(input1.data_layout()));
- return config_id;
-}
-
-std::string ClSaturatedArithmeticKernel::name()
-{
- return supported_sat_arithmetic_ops[_op];
-}
-
-/** Arithmetic operations*/
-void ClArithmeticKernel::configure(const ClCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst,
- const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_ERROR_THROW_ON(ClArithmeticKernel::validate(op, src1, src2, dst, act_info));
- auto padding_info = get_padding_info({ src1, src2, dst });
-
- _op = op;
- _act_info = act_info;
- configure_common(compile_context, src1, src2, dst);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClArithmeticKernel::validate(ArithmeticOperation op, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst);
- if(op == ArithmeticOperation::DIV)
- {
- // Partial integer support S32/F32/F16
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_divide_operation(src1, src2, dst));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_division(*src1->clone(), *src2->clone(), *dst->clone()).first);
- }
- else if(op == ArithmeticOperation::POWER)
- {
- // Power operators doesn't support integer arithmetic
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_float_only_supported_rules(*src1, *src2, *dst));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_division(*src1->clone(), *src2->clone(), *dst->clone()).first);
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*src1, *src2, *dst));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*src1->clone(), *src2->clone(), *dst->clone()).first);
- }
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(dst->data_type()));
-
- return Status{};
-}
-std::pair<Status, Window> ClArithmeticKernel::validate_and_configure_window(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst)
-{
- if(_op == ArithmeticOperation::DIV || _op == ArithmeticOperation::POWER)
- {
- // Division and Power operators don't support integer arithmetic
- return validate_and_configure_window_for_division(src1, src2, dst);
- }
- else
- {
- return validate_and_configure_window_for_arithmetic_operators(src1, src2, dst);
- }
-}
-
-CLBuildOptions ClArithmeticKernel::generate_build_options(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst)
-{
- return generate_build_options_with_arithmetic_rules(src1, src2, dst, name());
-}
-std::string ClArithmeticKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst)
-{
- return generate_id_for_tuning_common(kernel_name, src1, dst);
-}
-
-std::string ClArithmeticKernel::name()
-{
- return supported_arithmetic_ops[_op];
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClElementwiseKernel.h b/src/core/gpu/cl/kernels/ClElementwiseKernel.h
deleted file mode 100644
index 4ed8ae73ab..0000000000
--- a/src/core/gpu/cl/kernels/ClElementwiseKernel.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_ELEMENTWISE_KERNEL_H
-#define ARM_COMPUTE_CL_ELEMENTWISE_KERNEL_H
-
-#include "src/core/KernelTypes.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for an element-wise operation kernel
- *
- * Element-wise operation is computed by:
- * @f[ dst(x,y) = OP(src1(x,y), src2(x,y))@f]
- *
- */
-class ClElementwiseKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClElementwiseKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClElementwiseKernel);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-
-protected:
- /** The name of the operation */
- virtual std::string name() = 0;
-
- /** Configure kernel for a given list of arguments
- *
- * @param[in] src1 First source tensor info. Data types supported: U8/S8/QASYMM8/QASYMM8_SIGNED/U16/S16/F16/U32/S32/F32.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src1.
- *
- * @return a pair of Status and Window
- */
- virtual std::pair<Status, Window> validate_and_configure_window(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst) = 0;
-
- /** Generate the build options for the specific kernel
- *
- * @reutrn a CLBuildOptions struct
- */
- virtual CLBuildOptions generate_build_options(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst) = 0;
-
- /** Generate the identifier for tuning
- *
- * @reutrn a string
- */
- virtual std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst) = 0;
-
- /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff)
- *
- */
- void configure_common(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst);
- /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff)
- *
- */
- void configure_common(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst);
-
- ActivationLayerInfo _act_info{};
-
-private:
- const ITensorInfo *_src1{ nullptr }; /**< Source tensor info 1 */
- const ITensorInfo *_src2{ nullptr }; /**< Source tensor info 2 */
- ITensorInfo *_dst{ nullptr }; /**< Destination tensor info */
-};
-
-class ClLogicalBinaryKernel : public ClElementwiseKernel
-{
-public:
- /** Default constructor */
- ClLogicalBinaryKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogicalBinaryKernel);
- /** Function to configure kernel
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] op Logical binary operation to be executed.
- * @param[in] src1 First source tensor info. Data types supported: U8.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src1.
- */
- void configure(const ClCompileContext &compile_context, LogicalOperation op, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst);
- /** Static function to check if the given configuration is valid for this kernel
- *
- * @param[in] op Logical binary operation to be executed.
- * @param[in] src1 First source tensor info. Data types supported: U8.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src1.
- */
- static Status validate(LogicalOperation op, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst);
-
-private:
- // Inherited methods overridden:
- std::string name() override;
- std::pair<Status, Window> validate_and_configure_window(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst) override;
- CLBuildOptions generate_build_options(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst) override;
- std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst) override;
-
- LogicalOperation _op{ LogicalOperation::Unknown };
-};
-
-/** Addition operation */
-class ClSaturatedArithmeticKernel : public ClElementwiseKernel
-{
-public:
- ClSaturatedArithmeticKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClSaturatedArithmeticKernel);
- /** Static function to check if given info will lead to a valid configuration of @ref ClSaturatedArithmeticKernel
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] op Arithmetic operation to be executed.
- * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] policy Policy to use to handle overflow.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const ClCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- /** Static function to check if given info will lead to a valid configuration of @ref ClSaturatedArithmeticKernel
- *
- * @param[in] op Arithmetic operation to be executed.
- * @param[in] input1 First tensor input info info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32.
- * @param[in] input2 Second tensor input info info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info info. Data types supported: Same as @p input1.
- * @param[in] policy Policy to use to handle overflow.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- *
- * @return a Status
- */
- static Status validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
-protected:
- // Inherited methods overridden:
- std::string name() override;
- std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output) override;
- CLBuildOptions generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) override;
- std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output) override;
-
-private:
- ConvertPolicy _policy{};
- ArithmeticOperation _op{};
-};
-
-class ClArithmeticKernel : public ClElementwiseKernel
-{
-public:
- ClArithmeticKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClArithmeticKernel);
-
- /** Static function to check if given info will lead to a valid configuration of @ref ClArithmeticKernel
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] op Arithmetic operation to be executed.
- * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const ClCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- /** Static function to check if given info will lead to a valid configuration of @ref ClArithmeticKernel
- *
- * @param[in] op Arithmetic operation to be executed.
- * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- *
- * @return a Status
- */
- static Status validate(ArithmeticOperation op, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
-protected:
- // Inherited methods overridden:
- std::string name() override;
- std::pair<Status, Window> validate_and_configure_window(ITensorInfo &src1, ITensorInfo &src2, ITensorInfo &dst) override;
- CLBuildOptions generate_build_options(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst) override;
- std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &src1, const ITensorInfo &dst) override;
-
-private:
- ArithmeticOperation _op{};
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_ELEMENTWISE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp b/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp
deleted file mode 100644
index 5cbb3f2e38..0000000000
--- a/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo &src, const ITensorInfo &dst, const ElementWiseUnary op)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src);
- if(op == ElementWiseUnary::LOGICAL_NOT)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::U8);
- }
- else if(op == ElementWiseUnary::NEG)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32);
- }
-
- // Validate in case of configured dst
- if(dst.total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
- }
-
- return Status{};
-}
-} // namespace
-
-void ClElementWiseUnaryKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const ElementWiseUnary &op)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- auto padding_info = get_padding_info({ src, dst });
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*src, *dst, op));
-
- const std::string kernel_name = "elementwise_unary";
- const int vec_size_x = 16 / dst->element_size();
- const int dst_width_x = dst->tensor_shape().x();
- const bool multi_access_x = (dst_width_x / vec_size_x > 0);
-
- // Set kernel build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(dst_width_x - vec_size_x, 0)));
- switch(op)
- {
- case ElementWiseUnary::RSQRT:
- build_opts.add_option("-DOPERATION=rsqrt_op");
- break;
- case ElementWiseUnary::EXP:
- build_opts.add_option("-DOPERATION=exp_op");
- break;
- case ElementWiseUnary::NEG:
- build_opts.add_option("-DOPERATION=neg_op");
- break;
- case ElementWiseUnary::SIN:
- build_opts.add_option("-DOPERATION=sin_op");
- break;
- case ElementWiseUnary::ABS:
- build_opts.add_option("-DOPERATION=fabs_op");
- break;
- case ElementWiseUnary::LOG:
- build_opts.add_option("-DOPERATION=natural_log_op");
- break;
- case ElementWiseUnary::ROUND:
- build_opts.add_option("-DOPERATION=round_op");
- break;
- case ElementWiseUnary::LOGICAL_NOT:
- build_opts.add_option("-DOPERATION=logical_not_op");
- break;
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*dst);
- if(multi_access_x)
- {
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClElementWiseUnaryKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const ElementWiseUnary &op)
-{
- ARM_COMPUTE_UNUSED(op);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*src, *dst, op));
-
- return Status{};
-}
-
-void ClElementWiseUnaryKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h b/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h
deleted file mode 100644
index 7e5edef3ee..0000000000
--- a/src/core/gpu/cl/kernels/ClElementwiseUnaryKernel.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_ELEMENTWISE_UNARY_KERNEL_H
-#define ARM_COMPUTE_CL_ELEMENTWISE_UNARY_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the elementwise unary operator */
-class ClElementWiseUnaryKernel : public IClKernel
-{
-public:
- ClElementWiseUnaryKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClElementWiseUnaryKernel);
- /** Initialise the kernel's srcs, dst.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src First source tensor info. Data types supported: F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: same as @p src.
- * @param[in] op Element wise unary operation to perform.
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const ElementWiseUnary &op);
- /** Static function to check if given info will lead to a valid configuration of @ref ClElementWiseUnaryKernel
- *
- * @param[in] src First source tensor info. Data types supported: F16/F32.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src.
- * @param[in] op Element wise unary operation to perform.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const ElementWiseUnary &op);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_ELEMENTWISE_UNARY_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClFillKernel.cpp b/src/core/gpu/cl/kernels/ClFillKernel.cpp
deleted file mode 100644
index b194ee549b..0000000000
--- a/src/core/gpu/cl/kernels/ClFillKernel.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClFillKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-void ClFillKernel::configure(ITensorInfo *tensor,
- const PixelValue &constant_value,
- Window *window)
-{
- configure(CLKernelLibrary::get().get_compile_context(), tensor, constant_value, window);
-}
-
-void ClFillKernel::configure(const CLCompileContext &compile_context, ITensorInfo *tensor,
- const PixelValue &constant_value,
- Window *window)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
- ARM_COMPUTE_ERROR_THROW_ON(validate(tensor, constant_value, window));
-
- const DataType data_type = tensor->data_type();
- const int vec_size_x = 16 / tensor->element_size();
-
- // Create and update the window (if needed)
- _full_window = calculate_max_window(*tensor);
- Window win = _full_window;
- if(window != nullptr)
- {
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(win, *window);
- win = *window;
- }
-
- const int output_width_x = win.num_iterations(0);
- const bool multi_access_x = output_width_x >= vec_size_x;
- const bool remainder_x = output_width_x % vec_size_x > 0;
-
- if(multi_access_x)
- {
- win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(constant_value, data_type));
- build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option_if(multi_access_x && remainder_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
- _kernel = create_kernel(compile_context, "memset", build_opts.options());
-}
-
-Status ClFillKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *window)
-{
- ARM_COMPUTE_UNUSED(tensor);
- ARM_COMPUTE_UNUSED(constant_value);
- if(window != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(window->x().step() != 1);
- }
- return Status{};
-}
-
-void ClFillKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto tensor = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
-
- // Collapse all the batches on the third
- Window collapsed = window.collapse_if_possible(_full_window, Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, tensor, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClFillKernel.h b/src/core/gpu/cl/kernels/ClFillKernel.h
deleted file mode 100644
index b439eac0de..0000000000
--- a/src/core/gpu/cl/kernels/ClFillKernel.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_FILL_KERNEL_H
-#define ARM_COMPUTE_CL_FILL_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for filling the planes of a tensor */
-class ClFillKernel : public IClKernel
-{
-public:
- ClFillKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClFillKernel);
- /** Initialise the kernel's tensor and filling value
- *
- * @param[in,out] tensor Input tensor info. Supported data types: All.
- * @param[in] constant_value The value used to fill the planes of the tensor
- * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr.
- */
- void configure(ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr);
- /** Initialise the kernel's tensor and filling value
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in,out] tensor Input tensor info. Supported data types: All.
- * @param[in] constant_value The value used to fill the planes of the tensor
- * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr);
- /** Static function to check if given info will lead to a valid configuration of @ref ClFillKernel
- *
- * @param[in] tensor Source tensor info. Data types supported: All.
- * @param[in] constant_value The value used to fill the planes of the tensor
- * @param[in] window Window to be used in case setting only part of a tensor. Default is nullptr.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *window = nullptr);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- Window _full_window{};
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLMEMSETRKERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClFloorKernel.cpp b/src/core/gpu/cl/kernels/ClFloorKernel.cpp
deleted file mode 100644
index 7296d40eaf..0000000000
--- a/src/core/gpu/cl/kernels/ClFloorKernel.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClFloorKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
-
- // Validate in case of configured output
- if(dst->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
- }
-
- return Status{};
-}
-} // namespace
-
-void ClFloorKernel::configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Auto initialize output
- auto_init_if_empty(*dst, src->tensor_shape(), 1, src->data_type());
-
- // Validate
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
- auto padding_info = get_padding_info({ src, dst });
-
- const unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / src->element_size(), src->dimension(0));
- const int vec_size_x_leftovers = src->dimension(0) % vec_size_x;
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftovers));
-
- // Create kernel
- _kernel = create_kernel(compile_context, "floor_layer", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps(vec_size_x));
- IClKernel::configure_internal(win);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClFloorKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
- return Status{};
-}
-
-void ClFloorKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(IClKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClFloorKernel.h b/src/core/gpu/cl/kernels/ClFloorKernel.h
deleted file mode 100644
index 646dfb30d8..0000000000
--- a/src/core/gpu/cl/kernels/ClFloorKernel.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_FLOOR_KERNEL_H
-#define ARM_COMPUTE_CL_FLOOR_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to perform a floor operation */
-class ClFloorKernel : public IClKernel
-{
-public:
- ClFloorKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClFloorKernel);
- /** Configure kernel for a given list of arguments
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data type supported: F16/F32.
- * @param[out] dst Destination tensor info. Same as @p src
- */
- void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
-
- /** Static function to check if given info will lead to a valid configuration of @ref ClFloorKernel
- *
- * @param[in] src Source tensor info. Data type supported: F16/F32.
- * @param[in] dst Destination tensor info. Same as @p src
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_FLOOR_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp
deleted file mode 100644
index 817a105b14..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-inline Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((fp_mixed_precision && (src0->data_type() != DataType::F16)), "Mixed precision floating point is supported only for F16 data");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the matrix B must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 2 && reshape_info.reinterpret_input_as_3d(), "The src1 tensor cannot have more than 2 dimensions if src0 has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((reshape_info.reinterpret_input_as_3d() || reshape_info.depth_output_gemm3d() != 0) && (src2 != nullptr)
- && (!reshape_info.broadcast_bias()),
- "Bias addition only supported with broadcast mode in case the input or dst has to be reinterpreted as 3D");
-
- if(!is_interleaved_transposed)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(0) != src1->dimension(1));
-
- if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int m = reshape_info.reinterpret_input_as_3d() ? src0->dimension(1) * src0->dimension(2) : src0->dimension(1);
- const unsigned int n = src1->dimension(0);
- const unsigned int src2_dim0 = src2->dimension(0);
- const unsigned int src2_dim1 = src2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1);
- if(reshape_info.broadcast_bias())
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
- }
- else
- {
- GEMMRHSMatrixInfo rhs_info;
- GEMMLHSMatrixInfo lhs_info;
- const auto m = static_cast<unsigned int>(reshape_info.m());
- const auto n = static_cast<unsigned int>(reshape_info.n());
- const int k = reshape_info.k();
- const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
- const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
- rhs_info.n0 = max_cl_vector_width / src1->element_size();
- rhs_info.k0 = 1;
- rhs_info.h0 = mult_transpose1xW_width;
- rhs_info.interleave = false;
- rhs_info.transpose = false;
- lhs_info.m0 = 4;
- lhs_info.k0 = 4;
- lhs_info.v0 = mult_interleave4x4_height;
- lhs_info.interleave = true;
- lhs_info.transpose = true;
-
- TensorShape tensor_shape0{ src0->tensor_shape() };
- tensor_shape0.set(0, k);
- tensor_shape0.set(1, m);
-
- TensorShape tensor_shape1{ src1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- const TensorInfo tensor_info0 = src0->clone()->set_tensor_shape(tensor_shape0);
- const TensorInfo tensor_info1 = src1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped0 = src0->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(tensor_info0, lhs_info));
- const TensorInfo tensor_info_reshaped1 = src1->clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src0, &tensor_info_reshaped0);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1);
-
- if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int src2_dim0 = src2->dimension(0);
- const unsigned int src2_dim1 = src2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1);
- if(reshape_info.broadcast_bias())
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
- }
-
- if(dst->total_size() != 0)
- {
- const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, is_interleaved_transposed, reshape_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst);
- }
-
- return Status{};
-}
-
-inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst,
- float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target,
- ElementsProcessed &num_elements_processed)
-{
- ARM_COMPUTE_UNUSED(beta);
- bool window_changed = false;
- Window win{};
- Window win_out{};
-
- const DataType data_type = src0->data_type();
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
- bool reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
-
- // In case both input and dst have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(reinterpret_input_as_3d == reinterpret_output_as_3d)
- {
- reinterpret_input_as_3d = false;
- reinterpret_output_as_3d = false;
- }
-
- // dst tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, is_interleaved_transposed, reshape_info)));
-
- TensorInfo tmp_info(*dst);
-
- if(reinterpret_output_as_3d)
- {
- // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(dst->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- if(is_interleaved_transposed)
- {
- // reinterpret_input_as_3d is not supported if is_interleaved_transposed is set
- ARM_COMPUTE_ERROR_ON(reshape_info.reinterpret_input_as_3d());
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
- num_elems_processed_per_iteration_y = 4;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- if(src2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- const int bias_processed_per_iteration_y = reshape_info.broadcast_bias() ? 1 : num_elems_processed_per_iteration_y;
-
- AccessWindowStatic src2_access(src2, 0, 0,
- ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
- ceil_to_multiple(src2->dimension(1), bias_processed_per_iteration_y));
-
- window_changed = update_window_and_padding(win, src2_access); // window used by the execute_window_loop
- }
- }
- else // The input tensors have not been reshaped
- {
- // Special case for 1xN, 2xN, 3xN and 4xN src0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
- num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
- num_elems_processed_per_iteration_y = std::min(static_cast<int>(dst->dimension(1)), 4);
-
- // Create kernels according to the architecture, data type and input size.
- GPUTarget arch_target = get_arch_from_target(gpu_target);
- if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
- {
- num_elems_processed_per_iteration_x = (src1->dimension(0) <= 1000 && src0->num_dimensions() == 1) ? 2 : 4;
- }
-
- // Configure window
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- AccessWindowStatic src0_access(src0, 0, 0, src0->dimension(0), src0->dimension(1));
- AccessWindowStatic src1_access(src1, 0, 0, ceil_to_multiple(src1->dimension(0), num_elems_processed_per_iteration_x), src1->dimension(1));
- AccessWindowStatic dst_access(dst, 0, 0,
- dst->dimension(0),
- dst->dimension(1));
-
- if(src2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- AccessWindowStatic src2_access(src2, 0, 0,
- ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
- src2->dimension(1));
-
- window_changed = update_window_and_padding(win, src0_access, src1_access, src2_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
- }
- else
- {
- window_changed = update_window_and_padding(win, src0_access, src1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
- }
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-void ClGemmMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha,
- float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, beta,
- is_interleaved_transposed, reshape_info, fp_mixed_precision));
-
- auto padding_info = is_interleaved_transposed ? get_padding_info({ src0, src1, dst }) : get_padding_info({ src0, dst });
-
- _reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
- _reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
- _add_bias = src2 != nullptr;
-
- // In case both input and dst have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_src0 = _reinterpret_input_as_3d ? src0->num_dimensions() - 1 : src0->num_dimensions();
-
- _slide_matrix_b = (src1->num_dimensions() >= num_dimensions_src0);
-
- const DataType data_type = src0->data_type();
-
- // Get target architecture
- GPUTarget gpu_target = get_target();
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src0, src1, src2, dst, beta, is_interleaved_transposed, reshape_info,
- gpu_target, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true, both will be turned off (false)
- // in which case we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by dst->dimension(1)
- const unsigned int internal_m = _reinterpret_output_as_3d ? dst->dimension(1) * dst->dimension(2) : dst->dimension(1);
- const unsigned int n = dst->dimension(0);
-
- const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(1) : src0->dimension(1);
- const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(2) : src0->dimension(2);
-
- const unsigned int m0 = num_elements_processed.y();
- const unsigned int n0 = num_elements_processed.x();
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % m0;
- const unsigned int partial_store_n0 = n % n0;
-
- // Create build options
- CLBuildOptions build_opts;
-
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(src2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(reshape_info.broadcast_bias(), "-DBROADCAST_BIAS");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2)));
- build_opts.add_option_if(activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(activation_info.activation())));
- build_opts.add_option_if(activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(activation_info.a()));
- build_opts.add_option_if(activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(activation_info.b()));
- build_opts.add_option("-DIN1_DIM_X=" + support::cpp11::to_string(src1->dimension(0)));
-
- const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST;
-
- std::string kernel_name;
- if(is_interleaved_transposed)
- {
- const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
- const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
-
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(src1->dimension(0) / (n0 * mult_transpose1xW_width)));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(mult_transpose1xW_width));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(mult_interleave4x4_height));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- if(is_data_type_float(data_type) && is_bifrost)
- {
- kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
- }
- else
- {
- kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
- if(fp_mixed_precision && data_type == DataType::F16)
- {
- // currently wider accumulator is only supported for fp16 kernels.
- kernel_name += "_acc32";
- }
- }
- }
- else // The input tensors have not been reshaped
- {
- build_opts.add_option("-DN=" + support::cpp11::to_string(n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(src0->dimension(0)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- // Create kernels according to the architecture, data type and input size.
- if(is_data_type_float(data_type) && is_bifrost)
- {
- kernel_name = "gemm_mm_floating_point";
-
- if(src0->num_dimensions() != 1)
- {
- kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
- if(fp_mixed_precision && data_type == DataType::F16)
- {
- // currently wider accumulator is only supported for fp16 kernels.
- kernel_name += "_acc32";
- }
- }
- else if(src1->dimension(0) <= 1000 && data_type == DataType::F32)
- {
- // The first kernel is optimized for the case of 1000 or less dst elements (e.g. FC8 of AlexNet and VGG-16, and
- // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 dst elements (e.g.
- // FC6 and FC7 of AlexNet and VGG-16).
- kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost_1000";
- }
-
- // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
- // via exhaustive autotuning over a range of representative layer configurations.
- set_lws_hint(cl::NDRange(4));
- }
- else // (MIDGARD and F32) or (F16)
- {
- kernel_name = "gemm_mm_floating_point";
- }
- }
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = "gemm_";
- _config_id += (is_interleaved_transposed ? "reshaped_" : "");
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (reshape_info.broadcast_bias() ? "broadcast_bias_" : "");
- _config_id += (fp_mixed_precision ? "fp_mixed_" : "");
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += lower_string(string_from_data_type(src0->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(3));
- _config_id += "_";
- _config_id += (is_interleaved_transposed ? support::cpp11::to_string(src1->dimension(0)) : support::cpp11::to_string(src1->dimension(1)));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClGemmMatrixMultiplyKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
-{
- // Note: num_elements_processed will be set in validate_and_configure_window()
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_UNUSED(activation_info);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, src2, dst, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src0->clone().get(),
- src1->clone().get(),
- (src2 != nullptr) ? src2->clone().get() : nullptr,
- dst->clone().get(),
- beta,
- is_interleaved_transposed,
- reshape_info,
- gpu_target,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void ClGemmMatrixMultiplyKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- const auto src2 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_ERROR_ON(_add_bias && src2 == nullptr);
-
- if(src1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- const unsigned int num_arguments_bias = _add_bias ? num_arguments_per_2D_tensor() + 1 : 0;
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + num_arguments_bias;
- const unsigned int total_cross_plane_pad = src0->info()->padding().top + src0->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the dst has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0) + num_arguments_bias;
- const unsigned int total_cross_plane_pad = dst->info()->padding().top + dst->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, src0, slice);
- add_2D_tensor_argument(idx, src1, slice_b);
- if(_add_bias)
- {
- add_2D_tensor_argument(idx, src2, slice);
- }
- add_2D_tensor_argument(idx, dst, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src1->info()->strides_in_bytes()[2]));
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src2->info()->strides_in_bytes()[2]));
- }
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[2]));
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h
deleted file mode 100644
index c1601335ee..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_KERNEL_H
-#define ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to multiply two input matrices "A" and "B" and add a martix "C" if provided. All elements of the output matrix will be multiplied by alpha. In case matrix C is passed, it will be added to the previous result.
- * For the matrix C, the broadcast addition is supported if the flag "broadcast_bias" is set in the GEMMReshapeInfo object
- *
- * @note If the input tensors @p src0 and @p src1 have been reshaped respectively with @ref ClGemmReshapeLhsMatrixKernel" and @ref ClGemmReshapeRhsMatrixKernel,
- * the flag @p is_interleaved_transposed must be set to true
- *
- * @attention @p src1 tensor must have at least 2 dimensions (matrix)
- */
-class ClGemmMatrixMultiplyKernel : public IClKernel
-{
-public:
- ClGemmMatrixMultiplyKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmMatrixMultiplyKernel);
- /** Initialise the kernel's input, output and alpha
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src0 Input tensor containing the Matrix A. Data types supported: F16/F32
- * @param[in] src1 Input tensor containing the Matrix B. Data type supported: same as @p src0
- * @param[in] src2 Input tensor containing the Matrix C (bias). Can be nullptr. Data type supported: same as @p src0
- * @param[out] dst Output tensor to store the result of matrix multiplication. Data type supported: same as @p src0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta (Optional) Weight of vector C. Default value is 0. Only beta = 1 is currently supported.
- * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref ClGemmReshapeLhsMatrixKernel and @ref ClGemmReshapeRhsMatrixKernel
- * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
- * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy
- * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
- *
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta = 0.f,
- bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(), bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClGemmMatrixMultiplyKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-public:
- bool _slide_matrix_b{ true };
- bool _reinterpret_input_as_3d{ false };
- bool _reinterpret_output_as_3d{ false };
- bool _add_bias{ false };
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp
deleted file mode 100644
index 97d64c433c..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.k0 & (rhs_info.k0 - 1)) && rhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 1 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (src2 != nullptr)
- && (!gemm_info.broadcast_bias),
- "Bias addition only supported with broadcast mode in case the input or dst has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.fp_mixed_precision, "Mixed precision not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs_info.export_to_cl_image, "Export to CLImage not supported for GEMM native");
-
- const unsigned int m = gemm_info.m;
- const unsigned int n = gemm_info.n;
- const unsigned int k = gemm_info.k;
-
- ARM_COMPUTE_UNUSED(m);
- ARM_COMPUTE_UNUSED(n);
- ARM_COMPUTE_UNUSED(k);
-
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(0) != k);
- ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(0) != n);
- ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(1) != k);
- if(gemm_info.reinterpret_input_as_3d)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) * src0->dimension(2) != m);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) != m);
- }
-
- if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int src2_dim0 = src2->dimension(0);
- const unsigned int src2_dim1 = src2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1);
- if(gemm_info.broadcast_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
-
- if(dst->total_size() != 0)
- {
- const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // In case both input and dst have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(reinterpret_input_as_3d == reinterpret_output_as_3d)
- {
- reinterpret_output_as_3d = false;
- }
-
- // dst tensor auto initialization if not yet initialized
- auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)));
-
- TensorInfo tmp_info(*dst);
-
- if(reinterpret_output_as_3d)
- {
- // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(dst->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic src0_access(src0, 0, 0,
- src0->dimension(0),
- src0->dimension(1));
- AccessWindowStatic src1_access(src1, 0, 0,
- ceil_to_multiple(src1->dimension(0), num_elems_processed_per_iteration_x),
- src1->dimension(1));
- AccessWindowStatic dst_access(dst, 0, 0,
- dst->dimension(0),
- dst->dimension(1));
-
- if(src2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- AccessWindowStatic src2_access(src2, 0, 0,
- ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
- src2->dimension(1));
-
- window_changed = update_window_and_padding(win, src0_access, src1_access, src2_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
- }
- else
- {
- window_changed = update_window_and_padding(win, src0_access, src1_access) || // window used by the execute_window_loop
- update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-void ClGemmMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha,
- float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
-
- auto padding_info = get_padding_info({ src0, dst });
- _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _add_bias = src2 != nullptr;
-
- // In case both input and dst have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if(_reinterpret_input_as_3d == _reinterpret_output_as_3d)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_src0 = src0->num_dimensions();
- _slide_matrix_b = (src1->num_dimensions() >= num_dimensions_src0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src0, src1, src2 != nullptr ? src2 : nullptr, dst, lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- IClKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true,
- // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by dst->dimension(1) and not by gemm_info.m
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : dst->dimension(1);
-
- const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(1) : src0->dimension(1);
- const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(2) : src0->dimension(2);
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
- // NOTE: This might have implications on heuristics and performance
- const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src0->data_type()));
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(src2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(gemm_info.broadcast_bias, "-DBROADCAST_BIAS");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2)));
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
-
- std::string kernel_name("gemm_mm_native");
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (gemm_info.broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
- _config_id += lower_string(string_from_data_type(src0->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.k0);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClGemmMatrixMultiplyNativeKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src0->clone().get(),
- src1->clone().get(),
- src2 != nullptr ? src2->clone().get() : nullptr,
- dst->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void ClGemmMatrixMultiplyNativeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- const auto src2 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_ERROR_ON(_add_bias && src2 == nullptr);
-
- if(src1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
- unsigned int idx0;
- if(_add_bias)
- {
- idx0 = 4 * num_arguments_per_2D_tensor() + 4;
- }
- else
- {
- idx0 = 3 * num_arguments_per_2D_tensor() + 3;
- }
- const unsigned int total_cross_plane_pad = src0->info()->padding().top + src0->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- if(_reinterpret_output_as_3d)
- {
- // Pass bottom paddings to the kernel if the dst has to be reinterpreted as 3D tensor
- unsigned int idx0;
- if(_add_bias)
- {
- idx0 = 4 * num_arguments_per_2D_tensor() + 4 + (_reinterpret_input_as_3d ? 1 : 0);
- }
- else
- {
- idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0);
- }
- const unsigned int total_cross_plane_pad = dst->info()->padding().top + dst->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, src0, slice);
- add_2D_tensor_argument(idx, src1, slice_b);
- if(_add_bias)
- {
- add_2D_tensor_argument(idx, src2, slice);
- }
- add_2D_tensor_argument(idx, dst, slice);
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src0->info()->strides_in_bytes()[2]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src1->info()->strides_in_bytes()[2]));
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src2->info()->strides_in_bytes()[2]));
- }
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[2]));
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h
deleted file mode 100644
index 4770b18b8e..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_NATIVE_KERNEL_H
-#define ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_NATIVE_KERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to multiply matrices when neither of the input matrices have been reshaped */
-class ClGemmMatrixMultiplyNativeKernel : public IClKernel
-{
-public:
- ClGemmMatrixMultiplyNativeKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmMatrixMultiplyNativeKernel);
- /** Initialise the kernel's input and dst.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src0 Input tensor for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] src1 Input tensor for the RHS matrix. Data type supported: same as @p src0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] src2 Input tensor containing the bias matrix. Data type supported: same as @p src0.
- * @param[out] dst dst tensor info. Data type supported: same as @p src0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows and accumulations to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * @param[in] rhs_info RHS matrix information used to retrieve the number of columns and accumulations to be processed by each thread. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.k0: same of lhs_info.k0
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClGemmMatrixMultiplyNativeKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- bool _slide_matrix_b{ true };
- bool _reinterpret_input_as_3d{ false };
- bool _reinterpret_output_as_3d{ false };
- bool _use_dummy_work_items{ false };
- bool _add_bias{ false };
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_NATIVE_KERNEL_H*/
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
deleted file mode 100644
index 27409b66ac..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLUtils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 != rhs_info.k0);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.transpose == rhs_info.transpose);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((lhs_info.transpose) && ((lhs_info.m0 & (lhs_info.m0 - 1)) && lhs_info.m0 != 3), "Only 2,3,4,8,16 are supported for m0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.transpose) && ((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (src2 != nullptr)
- && (!gemm_info.broadcast_bias),
- "Bias addition only supported with broadcast mode in case the input or dst has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.fp_mixed_precision && (src0->data_type() == DataType::F32), "Mixed precision only supported for F16 data type");
- ARM_COMPUTE_RETURN_ON_ERROR(gemm::validate_image2d_support_on_rhs(*src1, rhs_info));
-
- const unsigned int m = gemm_info.m;
- const unsigned int n = gemm_info.n;
- const unsigned int k = gemm_info.k;
-
- TensorShape tensor_shape0{ src0->tensor_shape() };
- tensor_shape0.set(0, k);
- tensor_shape0.set(1, m);
-
- TensorShape tensor_shape1{ src1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int src2_dim0 = src2->dimension(0);
- const unsigned int src2_dim1 = src2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1);
- if(gemm_info.broadcast_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
-
- const TensorInfo tensor_info0 = src0->clone()->set_tensor_shape(tensor_shape0);
- const TensorInfo tensor_info1 = src1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped0 = src0->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(tensor_info0, lhs_info));
- const TensorInfo tensor_info_reshaped1 = src1->clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src0, &tensor_info_reshaped0);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1);
-
- if(dst->total_size() != 0)
- {
- const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // dst tensor auto initialization if not yet initialized
- auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)));
-
- TensorInfo tmp_info(*dst);
-
- if(reinterpret_output_as_3d)
- {
- // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(dst->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- if(src2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- const int bias_processed_per_iteration_y = gemm_info.broadcast_bias ? 1 : num_elems_processed_per_iteration_y;
-
- AccessWindowStatic src2_access(src2, 0, 0,
- ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
- ceil_to_multiple(src2->dimension(1), bias_processed_per_iteration_y));
-
- window_changed = update_window_and_padding(win, src2_access); // window used by the execute_window_loop
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-void ClGemmMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compile_context,
- ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
-
- auto padding_info = get_padding_info({ src0, dst });
- _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _add_bias = src2 != nullptr;
- _export_to_cl_image = rhs_info.export_to_cl_image;
- _k = gemm_info.k;
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_src0 = src0->num_dimensions();
- _slide_matrix_b = (src1->num_dimensions() >= num_dimensions_src0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src0, src1, src2, dst, lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- const bool enable_mixed_precision = gemm_info.fp_mixed_precision;
- const DataType data_type = src0->data_type();
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : dst->dimension(1);
-
- const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(src2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(dst->dimension(1)));
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(dst->dimension(2)));
- build_opts.add_option_if(gemm_info.broadcast_bias, "-DBROADCAST_BIAS");
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2)));
- build_opts.add_option_if(lhs_info.interleave, "-DLHS_INTERLEAVE");
- build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
- build_opts.add_option_if(lhs_info.transpose, "-DLHS_TRANSPOSE");
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
- build_opts.add_option_if(enable_mixed_precision, "-DMIXED_PRECISION");
- build_opts.add_option_if(rhs_info.export_to_cl_image, "-DOPENCL_IMAGE_SUPPORT");
- build_opts.add_option("-DRHS_HEIGHT=" + support::cpp11::to_string(src1->dimension(1)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DDATA_TYPE_ACCUMULATOR=" + (enable_mixed_precision ? get_cl_type_from_data_type(DataType::F32) : get_cl_type_from_data_type(data_type)));
- build_opts.add_option("-DM=" + support::cpp11::to_string(gemm_info.m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
-
- std::string kernel_name("gemm_mm_reshaped_");
- kernel_name += lhs_info.transpose ? "lhs_t_" : "lhs_nt_";
- kernel_name += rhs_info.transpose ? "rhs_t" : "rhs_nt";
- kernel_name += rhs_info.export_to_cl_image ? "_texture" : "";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (gemm_info.broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
- _config_id += lower_string(string_from_data_type(src0->data_type()));
- _config_id += "_";
- _config_id += (enable_mixed_precision ? "mixed_precision_" : "");
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.v0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.h0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.interleave);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.interleave);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClGemmMatrixMultiplyReshapedKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src0->clone().get(),
- src1->clone().get(),
- src2 != nullptr ? src2->clone().get() : nullptr,
- dst->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void ClGemmMatrixMultiplyReshapedKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- const auto src2 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_ERROR_ON(_add_bias && src2 == nullptr);
-
- if(src1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0);
- }
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- const unsigned int total_cross_plane_pad = dst->info()->padding().top + dst->info()->padding().bottom;
-
- cl::Image2D src1_image2d;
-
- if(_export_to_cl_image)
- {
- const TensorShape shape2d(src1->info()->dimension(0) / 4, src1->info()->dimension(1) * src1->info()->dimension(2));
- const size_t image_row_pitch = src1->info()->strides_in_bytes()[1];
-
- src1_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), src1->cl_buffer(), shape2d, src1->info()->data_type(), image_row_pitch);
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
-
- // LHS buffer
- add_2D_tensor_argument(idx, src0, slice);
-
- // RHS buffer or RHS OpenCL image (_export_to_cl_image == true)
- if(_export_to_cl_image)
- {
- _kernel.setArg(idx++, src1_image2d);
- }
- else
- {
- add_2D_tensor_argument(idx, src1, slice_b);
- }
-
- // Bias buffer (_add_bias == true)
- add_2D_tensor_argument_if(_add_bias, idx, src2, slice);
-
- // dst buffer
- add_2D_tensor_argument(idx, dst, slice);
-
- // K dimension (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_k));
-
- // LHS stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src0->info()->strides_in_bytes()[2]));
-
- // RHS stride_z (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src1->info()->strides_in_bytes()[2]));
-
- // Bias stride_z (if _add_bias == true)
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src2->info()->strides_in_bytes()[2]));
- }
-
- // dst stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[2]));
-
- // Cross-plan padding (if _reinterpret_output_as_3d = true)
- if(_reinterpret_output_as_3d)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- // Dispatch kernel
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h
deleted file mode 100644
index ab648f15ae..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_RESHAPED_KERNEL_H
-#define ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_RESHAPED_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-#include "arm_compute/core/KernelDescriptors.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to multiply matrices when both the input matrices LHS (src0) and RHS (src1) have been reshaped
- *
- * @note The input matrices @p src0 and @p src1 must be reshaped through:
- * - @ref ClGemmReshapeLhsMatrixKernel
- * - @ref ClGemmReshapeRhsMatrixKernel
- */
-class ClGemmMatrixMultiplyReshapedKernel : public IClKernel
-{
-public:
- ClGemmMatrixMultiplyReshapedKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmMatrixMultiplyReshapedKernel);
- /** Initialise the kernel's input and output.
- *
- * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
- * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
- * multiplications. i.e. float c = (half)a * (half)b
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the src1 should satisfy the OpenCL pitch alignment requirement
- * -# src1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# src1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
- * @param[in] src1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p src0. The number of dimensions for the RHS matrix must be less or equal than 3
- * @param[in] src2 Input tensor containing the bias matrix. Data type supported: same as @p src0.
- * @param[out] dst dst tensor to store the result of matrix multiplication. Data type supported: same as @p src0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used for reshaping the src0 tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.transpose: false
- * @param[in] rhs_info RHS matrix information used for reshaping the src1 tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
- * rhs_info.transpose: true
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- *
- * @note lhs_info.k0 must be equal to rhs_info.k0
- */
- void configure(const ClCompileContext &compile_context,
- ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClGemmMatrixMultiplyReshapedKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info,
- const GEMMKernelInfo &gemm_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- bool _slide_matrix_b{ true };
- bool _reinterpret_output_as_3d{ false };
- bool _use_dummy_work_items{ false };
- bool _add_bias{ false };
- bool _export_to_cl_image{ false };
- unsigned int _k{ 1 };
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_RESHAPED_KERNEL_H */ \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp
deleted file mode 100644
index 4eea2c6f76..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLUtils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/float_ops.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-using ElementsProcessed = Steps;
-
-Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs_info.m0 < 1 || lhs_info.m0 > 8, "Only 1,2,3,4,5,6,7,8 are supported for m0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16 || rhs_info.k0 < 2);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.k0 & (rhs_info.k0 - 1)) && rhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 > 16 || rhs_info.n0 < 2);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((gemm_info.reinterpret_input_as_3d || gemm_info.depth_output_gemm3d != 0) && (src2 != nullptr)
- && (!gemm_info.broadcast_bias),
- "Bias addition only supported with broadcast mode in case the input or dst has to be reinterpreted as 3D");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.fp_mixed_precision, "Mixed precision not supported");
- ARM_COMPUTE_RETURN_ON_ERROR(gemm::validate_image2d_support_on_rhs(*src1, rhs_info));
-
- const unsigned int m = gemm_info.m;
- const unsigned int n = gemm_info.n;
- const unsigned int k = gemm_info.k;
-
- TensorShape tensor_shape1{ src1->tensor_shape() };
- tensor_shape1.set(0, n);
- tensor_shape1.set(1, k);
-
- if(src2 != nullptr && !(helpers::float_ops::is_zero(beta)))
- {
- const unsigned int src2_dim0 = src2->dimension(0);
- const unsigned int src2_dim1 = src2->dimension(1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src0);
- if(gemm_info.broadcast_bias)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix");
- }
- }
-
- const TensorInfo tensor_info1 = src1->clone()->set_tensor_shape(tensor_shape1);
-
- const TensorInfo tensor_info_reshaped1 = src1->clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(tensor_info1, rhs_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(0) != k);
- if(gemm_info.reinterpret_input_as_3d)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) * src0->dimension(2) != m);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) != m);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1);
-
- if(dst->total_size() != 0)
- {
- const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info, ElementsProcessed &num_elements_processed)
-{
- unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
- unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
- bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- bool reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
-
- Window win{};
- Window win_out{};
- bool window_changed = false;
-
- // In case both input and dst have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- // This approach should only be used when the input/dst tensors have pad on the y direction
- if((reinterpret_input_as_3d == reinterpret_output_as_3d) && gemm_info.has_pad_y)
- {
- reinterpret_output_as_3d = false;
- }
-
- // dst tensor auto initialization if not yet initialized
- auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)));
-
- TensorInfo tmp_info(*dst);
-
- if(reinterpret_output_as_3d)
- {
- // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(dst->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // Configure kernel window
- num_elems_processed_per_iteration_x = rhs_info.n0;
- num_elems_processed_per_iteration_y = lhs_info.m0;
-
- win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- if(src2 != nullptr)
- {
- const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
-
- AccessWindowStatic src2_access(src2, 0, 0,
- ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x),
- src2->dimension(1));
-
- window_changed = update_window_and_padding(win, src2_access);
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win;
- const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(dst->num_dimensions()), 2u);
- collapsed = win.collapse(win, dimension_to_collapse);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-void ClGemmMatrixMultiplyReshapedOnlyRhsKernel::configure(const CLCompileContext &compile_context,
- ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
-
- _reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d;
- _reinterpret_output_as_3d = gemm_info.depth_output_gemm3d != 0;
- _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
- _add_bias = src2 != nullptr;
- _export_to_cl_image = rhs_info.export_to_cl_image;
- _has_pad_y = gemm_info.has_pad_y;
-
- auto padding_info = get_padding_info({ src0, src1, dst });
-
- // In case both input and dst have to be reinterpreted as 3D tensors,
- // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
- if((_reinterpret_input_as_3d == _reinterpret_output_as_3d) && _has_pad_y)
- {
- _reinterpret_input_as_3d = false;
- _reinterpret_output_as_3d = false;
- }
-
- // Check if we need to slide the matrix B
- const unsigned int num_dimensions_src0 = src0->num_dimensions();
- _slide_matrix_b = (src1->num_dimensions() >= num_dimensions_src0);
-
- ElementsProcessed num_elements_processed{};
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src0, src1, src2, dst, lhs_info, rhs_info, gemm_info, num_elements_processed);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // If _reinterpret_input_as_3d = reinterpret_output_as_3d = true,
- // we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
- // This means that the actual m used by the kernel is given by dst->dimension(1) and not by gemm_info.m
- const unsigned int internal_m = _reinterpret_output_as_3d ? gemm_info.m : dst->dimension(1);
-
- // These variables are used only if gemm_info.has_pad_y == true
- const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(1) : src0->dimension(1);
- const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(2) : src0->dimension(2);
-
- // Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
- // NOTE: This might have implications on heuristics and performance
- const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
-
- // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
- const unsigned int partial_store_m0 = internal_m % internal_m0;
- const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src0->data_type()));
- build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
- build_opts.add_option_if(src2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
- build_opts.add_option_if(gemm_info.broadcast_bias, "-DBROADCAST_BIAS");
- build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2)));
- build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE");
- build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option_if(rhs_info.export_to_cl_image, "-DOPENCL_IMAGE_SUPPORT");
- build_opts.add_option("-DRHS_HEIGHT=" + support::cpp11::to_string(src1->dimension(1)));
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
- build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
- build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(gemm_info.activation_info.activation())));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.a()));
- build_opts.add_option_if(gemm_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(gemm_info.activation_info.b()));
- if(_has_pad_y)
- {
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
- build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
- }
-
- std::string kernel_name("gemm_mm_reshaped_only_rhs_");
- kernel_name += rhs_info.transpose ? "t" : "nt";
- kernel_name += rhs_info.export_to_cl_image ? "_texture" : "";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += (_has_pad_y ? "" : "no_pad_y_");
- _config_id += (_add_bias ? "add_bias_" : "");
- _config_id += (gemm_info.broadcast_bias ? "broadcast_bias_" : "");
- _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
- _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
- _config_id += (gemm_info.activation_info.enabled() ? "fused_activation_" : "");
- _config_id += lower_string(string_from_data_type(src0->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(gemm_info.k);
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.n0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.h0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(rhs_info.interleave);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClGemmMatrixMultiplyReshapedOnlyRhsKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info,
- const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info)
-{
- ElementsProcessed num_elements_processed{};
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, src2, dst, alpha, beta, lhs_info, rhs_info, gemm_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src0->clone().get(),
- src1->clone().get(),
- src2 != nullptr ? src2->clone().get() : nullptr,
- dst->clone().get(),
- lhs_info,
- rhs_info,
- gemm_info,
- num_elements_processed)
- .first);
-
- return Status{};
-}
-
-void ClGemmMatrixMultiplyReshapedOnlyRhsKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- const auto src2 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst);
- ARM_COMPUTE_ERROR_ON(_add_bias && src2 == nullptr);
-
- if(src1->info()->num_dimensions() < 3)
- {
- // The stride_z for matrix B must be zero if we do not slice
- ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0);
- }
-
- const size_t lhs_idx_batch_size = _reinterpret_input_as_3d && !_has_pad_y ? 3u : 2u;
- const size_t rhs_idx_batch_size = 2u;
- const size_t bia_idx_batch_size = 2u;
- const size_t out_idx_batch_size = _reinterpret_output_as_3d && !_has_pad_y ? 3u : 2u;
-
- Window slice = window.first_slice_window_3D();
- Window slice_matrix_b = slice;
-
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
-
- // Get cross plane pads
- const unsigned int total_cross_plane_pad_lhs = src0->info()->padding().top + src0->info()->padding().bottom;
- const unsigned int total_cross_plane_pad_out = dst->info()->padding().top + dst->info()->padding().bottom;
-
- // The execution should fail if we try to run with has_pad_y = false but we have padding in either the LHS or DST tensor
- ARM_COMPUTE_ERROR_ON(!_has_pad_y && ((total_cross_plane_pad_lhs != 0) || (total_cross_plane_pad_out != 0)));
-
- cl::Image2D src1_image2d;
-
- if(_export_to_cl_image)
- {
- const TensorShape shape2d(src1->info()->dimension(0) / 4, src1->info()->dimension(1) * src1->info()->dimension(2));
- const size_t image_row_pitch = src1->info()->strides_in_bytes()[1];
-
- src1_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), src1->cl_buffer(), shape2d, src1->info()->data_type(), image_row_pitch);
- }
-
- do
- {
- Window slice_b = slice;
- // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
- // This scenario can happen when the matrix multiplication is used to perform a convolution operation
- if(!_slide_matrix_b)
- {
- slice_b = slice_matrix_b;
- }
-
- unsigned int idx = 0;
-
- // LHS buffer
- add_2D_tensor_argument(idx, src0, slice);
-
- // RHS buffer or RHS OpenCL image (_export_to_cl_image == true)
- if(_export_to_cl_image)
- {
- _kernel.setArg(idx++, src1_image2d);
- }
- else
- {
- add_2D_tensor_argument(idx, src1, slice_b);
- }
-
- // Bias buffer (_add_bias == true)
- add_2D_tensor_argument_if(_add_bias, idx, src2, slice);
-
- // dst buffer
- add_2D_tensor_argument(idx, dst, slice);
-
- // LHS stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src0->info()->strides_in_bytes()[lhs_idx_batch_size]));
-
- // RHS stride_z (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src1->info()->strides_in_bytes()[rhs_idx_batch_size]));
-
- // Bias stride_z (if _add_bias == true)
- if(_add_bias)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src2->info()->strides_in_bytes()[bia_idx_batch_size]));
- }
-
- // dst stride_z
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[out_idx_batch_size]));
-
- // Cross-plan padding (if _reinterpret_input_as_3d = true)
- if(_reinterpret_input_as_3d && _has_pad_y)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad_lhs));
- }
-
- // Cross-plan padding (if reinterpret_output_as_3d = true)
- if(_reinterpret_output_as_3d && _has_pad_y)
- {
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad_out));
- }
-
- enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h b/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h
deleted file mode 100644
index ff6c391e15..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_RESHAPED_ONLY_RHS_KERNEL_H
-#define ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_RESHAPED_ONLY_RHS_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-#include "arm_compute/core/KernelDescriptors.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to multiply matrices when only the input matrix RHS (src1) has been reshaped
- *
- * @note The input matrix src1 must be reshaped through @ref ClGemmReshapeRhsMatrixKernel
- */
-class ClGemmMatrixMultiplyReshapedOnlyRhsKernel : public ICLKernel
-{
-public:
- ClGemmMatrixMultiplyReshapedOnlyRhsKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmMatrixMultiplyReshapedOnlyRhsKernel);
- /** Initialise the kernel's input and output.
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
- * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
- * the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# The stride Y for the src1 should satisfy the OpenCL pitch alignment requirement
- * -# src1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# src1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src0 Input tensor containing the LHS matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true).
- * The number of dimensions for the LHS matrix must be less or equal than 4.
- * @param[in] src1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p src0. The number of dimensions for the RHS matrix must be less or equal than 3.
- * @param[in] src2 Input tensor containing the bias matrix. Data type supported: same as @p src0.
- * @param[out] dst Output tensor to store the result of matrix multiplication. Data type supported: same as @p src0
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of the matrix bias
- * @param[in] lhs_info LHS matrix information used to retrieve the number of rows to be processed by each thread. Only the following values are supported:
- * lhs_info.m0: 1,2,3,4,5,6,7,8
- * @param[in] rhs_info RHS matrix information used for reshaping the src1 tensor. Only the following values are supported:
- * rhs_info.k0: 2,3,4,8,16
- * rhs_info.n0: 2,3,4,8,16
- * rhs_info.transpose: true,false
- * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
- */
- void configure(const ClCompileContext &compile_context,
- ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClGemmMatrixMultiplyReshapedOnlyRhsKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta,
- const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMKernelInfo &gemm_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- bool _slide_matrix_b{ true };
- bool _reinterpret_input_as_3d{ false };
- bool _reinterpret_output_as_3d{ false };
- bool _use_dummy_work_items{ false };
- bool _add_bias{ false };
- bool _export_to_cl_image{ false };
- bool _has_pad_y{ false };
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_RESHAPED_ONLY_RHS_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp b/src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
deleted file mode 100644
index 98161edfff..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.v0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
-
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
-
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(),
- misc::shape_calculator::compute_lhs_reshaped_shape(*src, lhs_info, reinterpret_input_as_3d));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- const unsigned int num_elems_processed_per_iteration_x = lhs_info.k0;
- const unsigned int num_elems_processed_per_iteration_y = lhs_info.m0;
- bool window_changed = false;
-
- TensorInfo tmp_info(*src);
-
- if(reinterpret_input_as_3d)
- {
- // Since the src tensor has to be reinterpreted as 3D and the execute window is based on a 2D interleave,
- // the window needs to be constructed on the 2D collapsed version of the tensor
- TensorShape tmp_shape(src->tensor_shape());
- tmp_shape.collapse(2U, 1U);
- tmp_info.set_tensor_shape(tmp_shape);
- }
-
- // dst auto inizialitation if not yet initialized
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(*src, lhs_info, reinterpret_input_as_3d)));
-
- // Configure window
- Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- Window win_in = calculate_max_window(*src, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic src_access(src, 0, 0,
- src->dimension(0),
- src->dimension(1));
- AccessWindowStatic dst_access(dst, 0, 0, dst->dimension(0), dst->dimension(1));
-
- window_changed = update_window_and_padding(win_in, src_access) || // window used by the execute_window_loop
- update_window_and_padding(win, dst_access); // window used to update the padding requirements of dst tensor
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win.collapse(win, Window::DimZ);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, lhs_info, reinterpret_input_as_3d));
-
- auto padding_info = get_padding_info({ src });
-
- _reinterpret_input_as_3d = reinterpret_input_as_3d;
-
- const unsigned int src_w = src->dimension(0);
- const unsigned int src_h = _reinterpret_input_as_3d ? src->dimension(1) * src->dimension(2) : src->dimension(1);
- const unsigned int partial_load_m0 = src_h % lhs_info.m0;
- const unsigned int partial_load_k0 = src_w % lhs_info.k0;
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src_w));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src_h));
- build_opts.add_option_if(lhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(src->dimension(1)));
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(src->dimension(2)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
- build_opts.add_option("-DPARTIAL_LOAD_M0=" + support::cpp11::to_string(partial_load_m0));
- build_opts.add_option("-DPARTIAL_LOAD_K0=" + support::cpp11::to_string(partial_load_k0));
-
- std::string kernel_name("gemm_reshape_lhs_matrix_");
- kernel_name += lhs_info.transpose ? "t" : "nt";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, lhs_info, reinterpret_input_as_3d);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Set config_id for enabling LWS tuning
- _config_id = "gemm_reshape_lhs_matrix_";
- _config_id += (_reinterpret_input_as_3d ? "3d_" : "");
- _config_id += lower_string(string_from_data_type(src->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.m0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.k0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.v0);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.interleave);
- _config_id += "_";
- _config_id += support::cpp11::to_string(lhs_info.transpose);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClGemmReshapeLhsMatrixKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, lhs_info, reinterpret_input_as_3d));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), lhs_info, reinterpret_input_as_3d).first);
-
- return Status{};
-}
-
-void ClGemmReshapeLhsMatrixKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- Window slice = window.first_slice_window_3D();
-
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the src has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 2 * num_arguments_per_3D_tensor();
- const unsigned int total_cross_plane_pad = src->info()->padding().top + src->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h b/src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
deleted file mode 100644
index b830ba02b4..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_RESHAPE_LHS_MATRIX_KERNEL_H
-#define ARM_COMPUTE_CL_GEMM_RESHAPE_LHS_MATRIX_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to reshape the LHS matrix when performing the matrix multiplication.
- * In particular, this function splits the src matrix in blocks of size M0xK0 (defined through GEMMLHSInfo) and
- * stores each one in the dst matrix unrolling the values
- */
-class ClGemmReshapeLhsMatrixKernel : public ICLKernel
-{
-public:
- ClGemmReshapeLhsMatrixKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmReshapeLhsMatrixKernel);
- /** Initialise the kernel's input and output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Input tensor. Data types supported: All
- * @param[out] dst Output tensor. Data type supported: same as @p src
- * @param[in] lhs_info LHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the src tensor. Only the following values are supported:
- * lhs_info.m0: 2,3,4,5,6,7,8
- * lhs_info.k0: 2,3,4,8,16
- * lhs_info.v0: greater than 0
- * lhs_info.transpose: true, false
- * lhs_info.interleave: true, false
- * @param[in] reinterpret_src_as_3d (Optional) True if the src has to be reinterpreted as 3D tensor
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_src_as_3d = false);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClGemmReshapeLhsMatrixKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_src_as_3d);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- bool _reinterpret_input_as_3d{ false };
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_RESHAPE_LHS_MATRIX_KERNEL_H */ \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp b/src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp
deleted file mode 100644
index e1ef7c61aa..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.h0 == 0);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.n0 & (rhs_info.n0 - 1)) && rhs_info.n0 != 3), "Only 2,3,4,8,16 are supported for n0");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((rhs_info.k0 & (rhs_info.k0 - 1)) && (rhs_info.k0 != 1) && (rhs_info.k0 != 3)), "Only 1,2,3,4,8,16 are supported for k0");
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16);
- ARM_COMPUTE_RETURN_ERROR_ON((rhs_info.k0 == 1) && (rhs_info.transpose));
-
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
-
- if(rhs_info.export_to_cl_image)
- {
- const TensorInfo tensor_reshaped_info(misc::shape_calculator::compute_rhs_reshaped_shape(*src, rhs_info), 1, src->data_type());
- ARM_COMPUTE_RETURN_ON_ERROR(gemm::validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info));
- }
-
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), misc::shape_calculator::compute_rhs_reshaped_shape(*src, rhs_info));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info)
-{
- const unsigned int num_elems_processed_per_iteration_x = rhs_info.n0;
- const unsigned int num_elems_processed_per_iteration_y = rhs_info.k0;
- bool window_changed = false;
-
- // dst auto initialization if not yet initialized
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(*src, rhs_info)));
-
- // Configure window
- Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowRectangle src_access(src, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
-
- window_changed = update_window_and_padding(win, src_access);
-
- if(rhs_info.export_to_cl_image)
- {
- gemm::update_padding_for_cl_image(dst);
- }
-
- // Collapse along the Z direction
- // This collapse needs to be here in order to tune the Z dimension of LWS
- Window collapsed = win.collapse(win, Window::DimZ);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
-}
-} // namespace
-
-void ClGemmReshapeRhsMatrixKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, rhs_info));
-
- // Create build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
- build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option_if(rhs_info.transpose, "-DTRANSPOSE");
- build_opts.add_option_if(rhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(1)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
-
- std::string kernel_name("gemm_reshape_rhs_matrix_");
- kernel_name += rhs_info.transpose ? "t" : "nt";
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, rhs_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-}
-
-Status ClGemmReshapeRhsMatrixKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, rhs_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), rhs_info).first);
-
- return Status{};
-}
-
-void ClGemmReshapeRhsMatrixKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- Window slice = window.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h b/src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h
deleted file mode 100644
index e877d87408..0000000000
--- a/src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_RESHAPE_RHS_MATRIX_KERNEL_H
-#define ARM_COMPUTE_CL_GEMM_RESHAPE_RHS_MATRIX_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to reshape the RHS matrix when performing the matrix multiplication
- * In particular, this kernel splits the src matrix in blocks of size K0xN0 and stores each one in
- * the dst matrix unrolling the values */
-class ClGemmReshapeRhsMatrixKernel : public ICLKernel
-{
-public:
- ClGemmReshapeRhsMatrixKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmReshapeRhsMatrixKernel);
- /** Initialise the kernel's input and output.
- *
- * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will guarantee the OpenCL pitch alignment for the output tensor,
- * required to create a OpenCL image object from buffer in @ref ClGemmMatrixMultiplyReshapedKernel and in @ref ClGemmMatrixMultiplyReshapedOnlyRhsKernel
- * Since the OpenCL image object is created importing the OpenCL buffer, the following conditions are required:
- * -# rhs_info.n0 can only be 4, 8 and 16
- * -# rhs_info.k0 can only be 4, 8 and 16
- * -# Data type can only be F32, F16
- * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
- * -# output width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
- * -# output (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
- * -# The output tensor should be only consumed by @ref ClGemmMatrixMultiplyReshapedKernel or @ref ClGemmMatrixMultiplyReshapedOnlyRhsKernel
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Input tensor. Data types supported: All
- * @param[out] dst Output tensor. Data type supported: same as @p src
- * @param[in] rhs_info RHS matrix information to be used for reshaping. This object contains all the necessary
- * information to reshape the src tensor. Only the following values are supported:
- * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.k0: 1,2,3,4,8,16 (k0 = 1 only if rhs_info.transpose = false), (only 4, 8 and 16 if rhs_info.export_to_cl_image == true)
- * rhs_info.h0: greater than 0
- * rhs_info.transpose: true, false
- * rhs_info.interleave: true, false
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClGemmReshapeRhsMatrixKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_RESHAPE_RHS_MATRIX_KERNEL_H */ \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.cpp b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.cpp
deleted file mode 100644
index 4436e98fe3..0000000000
--- a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, unsigned int height_offset, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimY) + height_offset > dst->dimension(Window::DimY));
-
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != dst->dimension(0));
- for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(i) != dst->dimension(i));
- }
- ARM_COMPUTE_RETURN_ERROR_ON(src->num_dimensions() > 4);
-
- return Status{};
-}
-} // namespace
-
-ClHeightConcatenateKernel::ClHeightConcatenateKernel()
- : _height_offset(0)
-{
-}
-
-Status ClHeightConcatenateKernel::validate(const ITensorInfo *src, unsigned int height_offset, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, height_offset, dst));
- return Status{};
-}
-
-void ClHeightConcatenateKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, height_offset, dst));
-
- auto padding_info = get_padding_info({ src, dst });
-
- _height_offset = height_offset;
-
- // Add build options
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, src->dimension(0));
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DHEIGHT_OFFSET=" + support::cpp11::to_string(_height_offset));
- build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(src->dimension(2)));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
-
- if(is_data_type_quantized_asymmetric(src->data_type()) && src->quantization_info() != dst->quantization_info())
- {
- const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "concatenate_height", build_opts.options());
- // Configure kernel window
-
- // The window needs to be based on src as we copy all the heights of src
- Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win.collapse(win, Window::DimZ));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-void ClHeightConcatenateKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, window);
- add_4D_tensor_argument(idx, dst, window);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
deleted file mode 100644
index 9a4380a5b7..0000000000
--- a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_HEIGHT_CONCATENATE_LAYER_KERNEL_H
-#define ARM_COMPUTE_CL_HEIGHT_CONCATENATE_LAYER_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the height concatenate kernel.
- * The source tensor will be concatenated into the destination tensor.
- */
-class ClHeightConcatenateKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClHeightConcatenateKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClHeightConcatenateKernel);
- /** Initialise the kernel's source and destination
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[in] height_offset The starting offset on the Y axis for the dst tensor.
- * @param[out] dst Destination tensor info. Data types supported: same as @p src.
- *
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClHeightConcatenateKernel
- *
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[in] height_offset The starting offset on the Y axis for the dst tensor.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, unsigned int height_offset, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-
-private:
- unsigned int _height_offset;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_HEIGHT_CONCATENATE_LAYER_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClMulKernel.cpp b/src/core/gpu/cl/kernels/ClMulKernel.cpp
deleted file mode 100644
index b8081bbacf..0000000000
--- a/src/core/gpu/cl/kernels/ClMulKernel.cpp
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClMulKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(overflow_policy);
- ARM_COMPUTE_UNUSED(rounding_policy);
-
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1,
- 1,
- DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16, DataType::S32,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2,
- 1,
- DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16, DataType::S32,
- DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(dst->data_type()));
-
- const TensorShape &out_shape = TensorShape::broadcast_shape(src1->tensor_shape(), src2->tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
-
- // Validate in case of configured dst
- if(dst->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst,
- 1,
- DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
- DataType::S16, DataType::QSYMM16, DataType::F16,
- DataType::S32, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() == DataType::U8 && (src1->data_type() != DataType::U8 || src2->data_type() != DataType::U8),
- "Dst can only be U8 if both src are U8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() == DataType::QASYMM8 && (src1->data_type() != DataType::QASYMM8 || src2->data_type() != DataType::QASYMM8),
- "Dst can only be QASYMM8 if both src are QASYMM8");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() == DataType::QASYMM8_SIGNED && (src1->data_type() != DataType::QASYMM8_SIGNED || src2->data_type() != DataType::QASYMM8_SIGNED),
- "Dst can only be QASYMM8_SIGNED if both src are QASYMM8_SIGNED");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() == DataType::QSYMM16 && (src1->data_type() != DataType::QSYMM16 || src2->data_type() != DataType::QSYMM16),
- "Dst can only be QSYMM16 if both src are QSYMM16");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((src1->data_type() == DataType::S32 || src2->data_type() == DataType::S32) && (dst->data_type() != DataType::S32),
- "Dst must be S32 if source tensors are S32");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0), "Wrong shape for dst");
- }
-
- return Status{};
-}
-} // namespace
-
-void ClMulKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src1, src2, dst,
- scale, overflow_policy, rounding_policy, act_info));
-
- auto padding_info = get_padding_info({ src1, src2, dst });
-
- const TensorShape &out_shape = TensorShape::broadcast_shape(src1->tensor_shape(), src2->tensor_shape());
- auto_init_if_empty(*dst, src1->clone()->set_tensor_shape(out_shape));
-
- int scale_int = -1;
- // Extract sign, exponent and mantissa
- int exponent = 0;
- float normalized_mantissa = std::frexp(scale, &exponent);
- // Use int scaling if factor is equal to 1/2^n for 0 <= n <= 15
- // frexp returns 0.5 as mantissa which means that the exponent will be in the range of -1 <= e <= 14
- // Moreover, it will be negative as we deal with 1/2^n
- if((normalized_mantissa == 0.5f) && (-14 <= exponent) && (exponent <= 1))
- {
- // Store the positive exponent. We know that we compute 1/2^n
- // Additionally we need to subtract 1 to compensate that frexp used a mantissa of 0.5
- scale_int = std::abs(exponent - 1);
- }
-
- std::string acc_type;
- // Check if it has float src and dst
- if(is_data_type_float(src1->data_type()) || is_data_type_float(src2->data_type()))
- {
- scale_int = -1;
- acc_type = (src1->data_type() == DataType::F32 || src2->data_type() == DataType::F32) ? "float" : "half";
- }
- else
- {
- if(src1->element_size() == 4 || src2->element_size() == 4)
- {
- // use 64 bit accumulator for 32-bit input
- acc_type = "long";
- }
- else if(src1->element_size() == 2 || src2->element_size() == 2)
- {
- // Use 32-bit accumulator for 16-bit input
- acc_type = "int";
- }
- else
- {
- // Use 16-bit accumulator for 8-bit input
- acc_type = "ushort";
- }
- }
-
- const bool is_quantized = is_data_type_quantized(src1->data_type());
- const unsigned int vec_size = adjust_vec_size(16 / dst->element_size(), dst->dimension(0));
- const unsigned int vec_size_leftover = dst->dimension(0) % vec_size;
-
- // Set kernel build options
- std::string kernel_name = "pixelwise_mul";
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(src1->data_type()));
- build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(src2->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst->data_type()));
- build_opts.add_option("-DVEC_SIZE_IN1=" + ((dst->dimension(0) != 1 && src1->dimension(0) == 1) ? "1" : support::cpp11::to_string(vec_size)));
- build_opts.add_option("-DVEC_SIZE_IN2=" + ((dst->dimension(0) != 1 && src2->dimension(0) == 1) ? "1" : support::cpp11::to_string(vec_size)));
- build_opts.add_option("-DVEC_SIZE_OUT=" + support::cpp11::to_string(vec_size));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover));
- if(is_quantized && (dst->data_type() != DataType::S32))
- {
- const UniformQuantizationInfo iq1_info = src1->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src2->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option_if(is_data_type_quantized_asymmetric(src1->data_type()),
- "-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(src2->data_type()),
- "-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
- build_opts.add_option_if(is_data_type_quantized_asymmetric(dst->data_type()),
- "-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
- build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- kernel_name += "_quantized";
- }
- else
- {
- kernel_name += (scale_int >= 0) ? "_int" : "_float";
- build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(dst->data_type()), "-DWRAP", "-DSATURATE");
- build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
- build_opts.add_option("-DACC_DATA_TYPE=" + acc_type);
- if(act_info.enabled())
- {
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- }
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set scale argument
- unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the src and dst parameters
-
- if(scale_int >= 0 && !is_quantized)
- {
- _kernel.setArg(idx++, scale_int);
- }
- else
- {
- _kernel.setArg(idx++, scale);
- }
-
- Window win = calculate_max_window(*dst, Steps(vec_size));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(dst->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
-}
-
-Status ClMulKernel::validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src1, src2, dst, scale, overflow_policy, rounding_policy, act_info));
-
- return Status{};
-}
-
-void ClMulKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const TensorShape &in_shape1 = src_0->info()->tensor_shape();
- const TensorShape &in_shape2 = src_1->info()->tensor_shape();
- const TensorShape &out_shape = dst->info()->tensor_shape();
-
- bool can_collapse = true;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
- {
- can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
- for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
- {
- can_collapse = (in_shape1[d] == in_shape2[d]);
- }
- }
-
- bool has_collapsed = false;
- Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
-
- const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
- const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
-
- Window slice = collapsed.first_slice_window_3D();
- Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
- Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src_0, slice_input1);
- add_3D_tensor_argument(idx, src_1, slice_input2);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
-
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-
-namespace
-{
-constexpr unsigned int vec_size_complex = 1;
-
-Status validate_arguments_complex(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 2, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2, 2, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2);
-
- const TensorShape &out_shape = TensorShape::broadcast_shape(src1->tensor_shape(), src2->tensor_shape());
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(dst->data_type()));
-
- // Validate in case of configured dst
- if(dst->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 2, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0), "Wrong shape for dst");
- }
-
- return Status{};
-}
-} // namespace
-
-void ClComplexMulKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(src1, src2, dst, act_info));
-
- auto padding_info = get_padding_info({ src1, src2, dst });
-
- const TensorShape &out_shape = TensorShape::broadcast_shape(src1->tensor_shape(), src2->tensor_shape());
- auto_init_if_empty(*dst, src1->clone()->set_tensor_shape(out_shape));
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
- if(act_info.enabled())
- {
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "pixelwise_mul_complex", build_opts.options());
-
- Window win = calculate_max_window(*dst, Steps(vec_size_complex));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClComplexMulKernel::validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(src1, src2, dst, act_info));
-
- return Status{};
-}
-
-void ClComplexMulKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const TensorShape &in_shape1 = src_0->info()->tensor_shape();
- const TensorShape &in_shape2 = src_1->info()->tensor_shape();
- const TensorShape &out_shape = dst->info()->tensor_shape();
-
- bool can_collapse = true;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
- {
- can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
- for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
- {
- can_collapse = (in_shape1[d] == in_shape2[d]);
- }
- }
-
- bool has_collapsed = false;
- Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
-
- const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
- const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
-
- Window slice = collapsed.first_slice_window_3D();
- Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
- Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src_0, slice_input1);
- add_3D_tensor_argument(idx, src_1, slice_input2);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
-
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
- ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
- }
- while(collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClMulKernel.h b/src/core/gpu/cl/kernels/ClMulKernel.h
deleted file mode 100644
index 44162f3db3..0000000000
--- a/src/core/gpu/cl/kernels/ClMulKernel.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_MUL_KERNEL_H
-#define ARM_COMPUTE_CL_MUL_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the pixelwise multiplication kernel. */
-class ClMulKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClMulKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClMulKernel);
- /** Initialise the kernel's src and dst.
- *
- * Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,U8) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16
- * - (QSYMM16,QSYMM16) -> S32
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src1 An src tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
- * @param[in] src2 An src tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
- * @param[out] dst The dst tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
- * @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
- * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
- * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClMulKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-
-/** Interface for the complex pixelwise multiplication kernel. */
-class ClComplexMulKernel : public ICLKernel
-{
-public:
- /** Default constructor */
- ClComplexMulKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClComplexMulKernel);
- /** Initialise the kernel's src and dst.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src1 An src tensor info. Data types supported: F32. Number of channels supported: 2.
- * @param[in] src2 An src tensor info. Data types supported: same as @p src1. Number of channels supported: same as @p src1.
- * @param[out] dst The dst tensor info. Data types supported: same as @p src1. Number of channels supported: same as @p src1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClComplexMulKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_MUL_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClPermuteKernel.cpp b/src/core/gpu/cl/kernels/ClPermuteKernel.cpp
deleted file mode 100644
index ffc13060a8..0000000000
--- a/src/core/gpu/cl/kernels/ClPermuteKernel.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClPermuteKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-TensorShape get_dst_shape(const ITensorInfo *src, const PermutationVector &perm)
-{
- TensorShape dst_shape = src->tensor_shape();
- permute(dst_shape, perm);
- return dst_shape;
-}
-
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->num_dimensions() < 1 || src->num_dimensions() > 4,
- "Permutation up to 4-D src tensor is supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(perm.num_dimensions() < 1 || perm.num_dimensions() > 4,
- "Permutation vector size should be less than or equal to 4");
- for(const auto &p : perm)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(p >= perm.num_dimensions(), "Permutation vector has invalid values");
- }
-
- // Validate configured dst
- if(dst->total_size() != 0)
- {
- const TensorShape dst_shape = misc::shape_calculator::compute_permutation_output_shape(*src, perm);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- }
- return Status{};
-}
-} // namespace
-
-void ClPermuteKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- auto padding_info = get_padding_info({ src, dst });
- const TensorShape dst_shape = get_dst_shape(src, perm);
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(dst_shape));
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, perm));
-
- _perm = perm;
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(src->data_type())));
- build_opts.add_option("-DDEPTH_IN=" + support::cpp11::to_string(src->dimension(2)));
- // New positions of width(W), height(H), channel(C) and batch(D) based on permutation vector
- build_opts.add_option("-DP1=" + support::cpp11::to_string((_perm.num_dimensions() >= 1) ? perm[0] : 0));
- build_opts.add_option("-DP2=" + support::cpp11::to_string((_perm.num_dimensions() >= 2) ? perm[1] : 1));
- build_opts.add_option("-DP3=" + support::cpp11::to_string((_perm.num_dimensions() >= 3) ? perm[2] : 2));
- build_opts.add_option("-DP4=" + support::cpp11::to_string((_perm.num_dimensions() >= 4) ? perm[3] : 3));
-
- _kernel = create_kernel(compile_context, "permute", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps());
-
- ICLKernel::configure_internal(win);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClPermuteKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, perm));
-
- return Status{};
-}
-
-void ClPermuteKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window slice_in = window.first_slice_window_4D().collapse(ICLKernel::window(), 2, 4);
-
- // Setup dst slice
- Window slice_out(slice_in);
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
- slice_out.set(3, Window::Dimension(0, 0, 0));
-
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice_in);
- add_4D_tensor_argument(idx, dst, slice_out);
- enqueue(queue, *this, slice_in, lws_hint());
- }
- while(window.slide_window_slice_4D(slice_in) && window.slide_window_slice_4D(slice_out));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClPermuteKernel.h b/src/core/gpu/cl/kernels/ClPermuteKernel.h
deleted file mode 100644
index b844214595..0000000000
--- a/src/core/gpu/cl/kernels/ClPermuteKernel.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_PERMUTE_KERNEL_H
-#define ARM_COMPUTE_CL_PERMUTE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to perform tensor permutation.
- *
- * Permutes given a permutation vector
- */
-class ClPermuteKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClPermuteKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClPermuteKernel);
- /** Set the src and dst of the kernel.
- *
- * @note Arbitrary permutation vectors are supported with rank not greater than 4
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src The src tensor info. Data types supported: All.
- * @param[in] dst The dst tensor info. Data types supported: Same as @p src
- * @param[in] perm Permutation vector
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm);
- /** Static function to check if given info will lead to a valid configuration of @ref ClPermuteKernel
- *
- * @note Arbitrary permutation vectors are supported with rank not greater than 4
- *
- * @param[in] src The src tensor info. Data types supported: All.
- * @param[in] dst The dst tensor info. Data types supported: same as @p src.
- * @param[in] perm Permutation vector
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- PermutationVector _perm{};
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_PERMUTE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClPool2dKernel.cpp b/src/core/gpu/cl/kernels/ClPool2dKernel.cpp
deleted file mode 100644
index 0e15bffd14..0000000000
--- a/src/core/gpu/cl/kernels/ClPool2dKernel.cpp
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClPool2dKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
-{
-// Internal window config info
-using ClPoolingConfig = std::pair<unsigned int, BorderSize>; //num_elems_processed_per_iteration, border_size
-
-void auto_init(const ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, PoolingLayerInfo pool_info)
-{
- TensorShape out_shape = compute_pool_shape(*src, pool_info);
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape));
- if(indices)
- {
- auto_init_if_empty(*indices, src->clone()->set_tensor_shape(out_shape).set_data_type(DataType::U32));
- }
-}
-
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(src->data_type()) && pool_info.pool_type == PoolingType::L2),
- "Unsupported combination of parameters!");
-
- const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const bool is_global_pooling = pool_info.is_global_pooling;
- unsigned int pool_size_x = is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
- unsigned int pool_size_y = is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
- int output_width = 0;
- int output_height = 0;
- std::tie(output_width, output_height) = scaled_dimensions_signed(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height],
- pool_size_x, pool_size_y, pool_info.pad_stride_info);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1), "Calculated output dimension size is invalid");
-
- // Check indices
- if(indices)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_info.pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_info.pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2");
-
- if(indices->total_size() != 0)
- {
- TensorInfo idx_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, DataType::U32));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(indices, &idx_info);
- }
- }
-
- // Checks performed when dst is configured
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst);
- TensorInfo out_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, dst->data_type()));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info);
- }
-
- return Status{};
-}
-
-std::tuple<Status, Window, ClPoolingConfig> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Get data layout
- const DataLayout data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
- int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
- std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_pad_right = pad_stride_info.pad_right();
- const int pool_pad_top = pad_stride_info.pad_top();
- const int pool_pad_left = pad_stride_info.pad_left();
- const int pool_pad_bottom = pad_stride_info.pad_bottom();
- BorderSize border_size = BorderSize();
-
- auto_init(src, dst, indices, pool_info);
- pooled_w = dst->tensor_shape()[idx_width];
- pooled_h = dst->tensor_shape()[idx_height];
-
- const DataType data_type = src->data_type();
-
- const int src_width = src->dimension(idx_width);
- const int src_height = src->dimension(idx_height);
-
- unsigned int num_elems_processed_per_iteration = 0;
- bool window_changed = false;
- Window win{};
- switch(data_layout)
- {
- case DataLayout::NCHW:
- {
- // Initialize border size
- border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left);
- // Change the number of elements processed per iteration
- // for pooling 3x3 with stride less equal than 3
- const bool can_optimize = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_quantized(data_type);
- num_elems_processed_per_iteration = can_optimize ? 4 : 1;
- const unsigned int num_elems_read_per_iteration = (num_elems_processed_per_iteration - 1) * pool_stride_x + pool_size_x;
-
- // Number of iterations in X dimension
- const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration;
-
- // Upper limit for the number of right/bottom border elements that are accessed
- const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - src_width;
- const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - src_height;
-
- border_size.right = std::max(upper_bound_w, pool_pad_right);
- border_size.bottom = std::max(upper_bound_h, pool_pad_bottom);
-
- win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
-
- AccessWindowRectangle src_access(src, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y,
- pool_stride_x, pool_stride_y);
- AccessWindowHorizontal dst_access(dst, 0, num_elems_processed_per_iteration);
-
- // Update indices window
- if(indices)
- {
- AccessWindowHorizontal indices_access(indices, 0, num_elems_processed_per_iteration);
- window_changed = update_window_and_padding(win, src_access, dst_access, indices_access);
- indices_access.set_valid_region(win, ValidRegion(Coordinates(), indices->tensor_shape()));
- }
- else
- {
- window_changed = update_window_and_padding(win, src_access, dst_access);
- }
-
- dst_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
- break;
- }
- case DataLayout::NHWC:
- {
- const size_t vec_size = dst->data_type() == DataType::F32 ? 2 : 4;
-
- // Initialize border size
- border_size = BorderSize();
- num_elems_processed_per_iteration = adjust_vec_size(vec_size, dst->dimension(0));
- win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_tuple(err, win, ClPoolingConfig(num_elems_processed_per_iteration, border_size));
-}
-} // namespace
-
-ClPool2dKernel::ClPool2dKernel()
- : _pool_info(), _data_layout(DataLayout::UNKNOWN), _border_size(0), _num_elems_processed_per_iteration(1)
-{
-}
-
-BorderSize ClPool2dKernel::border_size() const
-{
- return _border_size;
-}
-
-void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- auto padding_info = get_padding_info({ src, dst, indices });
-
- // Set instance variables
- _pool_info = pool_info;
- _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- const PoolingType pool_type = pool_info.pool_type;
- const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const int idx_channel = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
- const int idx_batch_size = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES);
- const int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
- const int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
- const bool exclude_padding = pool_info.exclude_padding;
- std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_pad_top = pad_stride_info.pad_top();
- const int pool_pad_left = pad_stride_info.pad_left();
-
- // Set build options
- CLBuildOptions build_opts;
- const DataType data_type = src->data_type();
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, pool_info, indices);
-
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
- ICLKernel::configure_internal(std::get<1>(win_config));
-
- ClPoolingConfig pooling_config = std::get<2>(win_config);
- _num_elems_processed_per_iteration = pooling_config.first;
- _border_size = pooling_config.second;
-
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(_num_elems_processed_per_iteration));
-
- // Tensor paddings are used to calculate the indicies for MAX pooling
- if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
- {
- build_opts.add_option("-DPAD_TENSOR_LEFT=" + support::cpp11::to_string(src->padding().left));
- build_opts.add_option("-DPAD_TENSOR_RIGHT=" + support::cpp11::to_string(src->padding().right));
- build_opts.add_option("-DPAD_TENSOR_TOP=" + support::cpp11::to_string(src->padding().top));
- build_opts.add_option("-DPAD_TENSOR_BOTTOM=" + support::cpp11::to_string(src->padding().bottom));
- build_opts.add_option("-DTENSOR_CHANNEL=" + support::cpp11::to_string(src->dimension(idx_channel)));
- build_opts.add_option("-DTENSOR_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width)));
- build_opts.add_option("-DTENSOR_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height)));
- }
-
- if(is_data_type_quantized_asymmetric(data_type) && src->quantization_info() != dst->quantization_info())
- {
- const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Check dst dimensions
- auto_init(src, dst, indices, pool_info);
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices));
-
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DPOOL_" + string_from_pooling_type(pool_type));
- build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(pool_stride_x));
- build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
- build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
- build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
- build_opts.add_option("-DPOOL_SIZE_X=" + support::cpp11::to_string(pool_size_x));
- build_opts.add_option("-DPOOL_SIZE_Y=" + support::cpp11::to_string(pool_size_y));
-
- // Set the initial value for the pooling operation accordingly with the data type
- if(pool_type == PoolingType::MAX)
- {
- if(is_data_type_quantized(data_type))
- {
- PixelValue type_min{};
- std::tie(type_min, std::ignore) = get_min_max(data_type);
- build_opts.add_option("-DINITIAL_VALUE=" + support::cpp11::to_string(type_min.get<int32_t>()));
- }
- else
- {
- build_opts.add_option("-DINITIAL_VALUE=" + float_to_string_with_full_precision(std::numeric_limits<float>::lowest()));
- }
- }
- else
- {
- // Pool AVG and Pool L2 initial value
- build_opts.add_option("-DINITIAL_VALUE=0");
- }
-
- build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left)));
- build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top)));
-
- // Create kernel
- switch(_data_layout)
- {
- case DataLayout::NCHW:
- {
- const auto use_fp_mixed_precision = (data_type == DataType::F16) && pool_info.fp_mixed_precision;
- const auto use_wider_accumulator = use_fp_mixed_precision && (pool_type != PoolingType::MAX);
- const auto acc_data_type = get_cl_type_from_data_type(use_wider_accumulator ? DataType::F32 : data_type);
- build_opts.add_option("-DACC_DATA_TYPE=" + acc_data_type);
- build_opts.add_option_if(use_wider_accumulator, "-DFP_MIXED_PRECISION");
-
- if(pool_type != PoolingType::MAX)
- {
- build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
- }
-
- if((pool_size_x == 3) && (pool_size_y == 3) && !is_data_type_quantized_asymmetric(data_type))
- {
- // Check if we have pool3x3 with stride_x less equal than 3. In these cases, run an optimized OpenCL kernel where
- // each thread computes 4 dst elements
- const bool is_pool3x3_stride_le3 = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3);
-
- std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_")
- + support::cpp11::to_string(pool_size_x);
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- else if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
- {
- // For max pooling with pool2x2, store indicies which will be used in max unpooling
- if(data_type == DataType::F32)
- {
- std::string kernel_name = "pooling_layer_2_nchw_indices_fp32";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- else if(data_type == DataType::F16)
- {
- std::string kernel_name = "pooling_layer_2_nchw_indices_fp16";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- }
- else // Run general case
- {
- std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nchw" : "pooling_layer_MxN_nchw";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- break;
- }
- case DataLayout::NHWC:
- {
- // Floating point mixed precision is support on F16 only
- const auto use_fp_mixed_precision = (data_type == DataType::F16) && pool_info.fp_mixed_precision && pool_type != PoolingType::MAX;
-
- // Wider accumulation is required to avoid accuracy loss
- // Case 1: Floating point mixed precision (fp16 src data and fp32 accumulation)
- // Cast 2: Quantized (int8/uint8 src data and int32 accumulation )
- DataType acc_data_type = data_type;
-
- if(use_fp_mixed_precision)
- {
- acc_data_type = DataType::F32;
- }
- else if(is_data_type_quantized(data_type) && pool_type != PoolingType::MAX)
- {
- acc_data_type = DataType::S32;
- }
-
- build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(acc_data_type));
- build_opts.add_option_if(use_fp_mixed_precision, "-DFP_MIXED_PRECISION");
- build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width)));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height)));
- build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(idx_height)));
- build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(idx_channel)));
- build_opts.add_option("-DDST_BATCH_SIZE=" + support::cpp11::to_string(dst->dimension(idx_batch_size)));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % _num_elems_processed_per_iteration));
- if(pool_info.pool_size == Size2D(2, 2) && is_data_type_float(data_type))
- {
- build_opts.add_option_if(indices != nullptr && pool_type == PoolingType::MAX, "-DEXTRACT_MAX_INDEX");
-
- std::string kernel_name = "pooling_layer_2x2_nhwc";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- else
- {
- std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nhwc" : "pooling_layer_MxN_nhwc";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-
- // Set config_id for enabling LWS tuning
- _config_id = "pooling_layer_";
- _config_id += lower_string(string_from_data_type(data_type));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(_data_layout));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(idx_width));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(idx_height));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(idx_channel));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(src->data_layout()));
-
- ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
-}
-
-Status ClPool2dKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(src->clone().get(), dst->clone().get(), pool_info)));
-
- return Status{};
-}
-
-void ClPool2dKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- unsigned int pool_stride_x = 0;
- unsigned int pool_stride_y = 0;
- std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info.stride();
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST_0));
- auto indices = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST_1));
-
- // Collapse window
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
- switch(_data_layout)
- {
- case DataLayout::NCHW:
- {
- Window slice = window_collapsed.first_slice_window_3D();
- do
- {
- // Upsample src by pool size
- Window in_slice(slice);
- in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info.pad_left(),
- (in_slice.x().end() - _pool_info.pad_stride_info.pad_left()) * pool_stride_x,
- pool_stride_x * _num_elems_processed_per_iteration));
- in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - _pool_info.pad_stride_info.pad_top(),
- (in_slice.y().end() - _pool_info.pad_stride_info.pad_top()) * pool_stride_y,
- pool_stride_y));
-
- // Set srcs
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, in_slice);
- add_3D_tensor_argument(idx, dst, slice);
- if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_size == Size2D(2, 2)))
- {
- add_3D_tensor_argument(idx, indices, slice);
- }
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
- break;
- }
- case DataLayout::NHWC:
- {
- const size_t batch_size = dst->info()->tensor_shape().total_size_upper(3);
-
- Window slice = window_collapsed.first_slice_window_4D();
- Window in_slice = window_collapsed.first_slice_window_4D();
- in_slice.set(Window::DimX, Window::Dimension(0, src->info()->dimension(0), _num_elems_processed_per_iteration));
- in_slice.set(Window::DimY, Window::Dimension(0, src->info()->dimension(1), pool_stride_x));
- in_slice.set(Window::DimZ, Window::Dimension(0, src->info()->dimension(2), pool_stride_y));
- in_slice.set(3, Window::Dimension(0, batch_size, 1));
- do
- {
- // Set srcs
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, in_slice);
- add_4D_tensor_argument(idx, dst, slice);
- if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_type == PoolingType::MAX) && (_pool_info.pool_size == Size2D(2, 2)))
- {
- add_4D_tensor_argument(idx, indices, slice);
- }
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_4D(slice) && window.slide_window_slice_4D(in_slice));
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClPool2dKernel.h b/src/core/gpu/cl/kernels/ClPool2dKernel.h
deleted file mode 100644
index 8ecb8eb7b7..0000000000
--- a/src/core/gpu/cl/kernels/ClPool2dKernel.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_POOL2D_KERNEL_H
-#define ARM_COMPUTE_CL_POOL2D_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the pooling layer kernel */
-class ClPool2dKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClPool2dKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClPool2dKernel);
-
- /** Configure kernel for a given list of arguments
- *
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: same as @p src.
- * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
- * @param[out] indices (optional) The indices of the maximal values. Data type supported: U32.
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to ClPool2dKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices = nullptr);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-public:
- PoolingLayerInfo _pool_info;
- DataLayout _data_layout;
- BorderSize _border_size;
- unsigned int _num_elems_processed_per_iteration;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_POOL2D_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
deleted file mode 100644
index 48d351d536..0000000000
--- a/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
-
- // Output must always be initialized
- ARM_COMPUTE_RETURN_ERROR_ON(dst->tensor_shape().total_size() == 0);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QASYMM16);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
-
- return Status{};
-}
-} // namespace
-
-void ClQuantizeKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- auto padding_info = get_padding_info({ src, dst });
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
-
- const int vec_size_x = 16 / src->element_size();
- const int input_width_x = src->tensor_shape().x();
- const bool multi_access_x = (input_width_x / vec_size_x > 0);
-
- const UniformQuantizationInfo qinfo = dst->quantization_info().uniform();
- const DataType output_data_type = dst->data_type();
-
- float scale_to_apply = qinfo.scale;
- int32_t offset_to_apply = qinfo.offset;
- if(is_data_type_quantized_asymmetric(src->data_type()))
- {
- /*
- * In case of requantization of a quantized input tensor to an output tensor with another quantization
- * instead of of apply dequantization and then a quantization functions, we just compute new scale and
- * offset to apply.
- *
- * Assuming:
- * - q_i as input quantized value
- * - q_o as output quantized value
- * - z_i as input quantization offset value
- * - z_o as output quantization offset value
- * - s_i as input quantization scale value
- * - s_o as output quantization scale value
- * - z_n as new quantization offset value
- * - s_n as new quantization scale value
- *
- * q_o = ( q_i - z_i ) * s_i / s_o + z_o
- *
- * We can rewrite the formula as:
- *
- * q_o = ( q_i * s_i / s_o ) - z_i * s_i / s_o + z_o
- *
- * q_o = q_i / s_n + z_n
- *
- * Where:
- *
- * s_n = s_o / s_i
- *
- * z_n = - z_i * s_i / s_o + z_o
- *
- */
- const UniformQuantizationInfo qinfo_in = src->quantization_info().uniform();
- scale_to_apply /= qinfo_in.scale;
- // In order to minimize flooring we convert the offset to a float,
- // then compute the new offset in the float domain,
- // finally we convert it back as int32_t
- offset_to_apply -= static_cast<int32_t>(static_cast<float>(qinfo_in.offset) * qinfo_in.scale / qinfo.scale);
- }
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option_if(is_data_type_float(src->data_type()), "-DIS_FLOAT");
- build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(scale_to_apply));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(offset_to_apply));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output_data_type));
- build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(input_width_x - vec_size_x, 0)));
- std::pair<int, int> min_max_quant_values = quantization::get_min_max_values_from_quantized_data_type(output_data_type);
- build_opts.add_option("-DMIN_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.first));
- build_opts.add_option("-DMAX_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.second));
-
- _kernel = create_kernel(compile_context, "quantization_layer", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps());
- if(multi_access_x)
- {
- win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
- return Status{};
-}
-
-void ClQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), 3);
- Window slice = window_collapsed.first_slice_window_3D();
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClQuantizeKernel.h b/src/core/gpu/cl/kernels/ClQuantizeKernel.h
deleted file mode 100644
index 8d37f33032..0000000000
--- a/src/core/gpu/cl/kernels/ClQuantizeKernel.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_QUANTIZE_KERNEL_H
-#define ARM_COMPUTE_CL_QUANTIZE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the quantization layer kernel.
- *
- * @note The implementation supports only 3D input tensors.
- */
-class ClQuantizeKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClQuantizeKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizeKernel);
- /** Set the input, output.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16.
- * @param[out] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16.
- *
- * @note Output auto initialization is not supported by this kernel
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to @ref ClQuantizeKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_QUANTIZE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClReshapeKernel.cpp b/src/core/gpu/cl/kernels/ClReshapeKernel.cpp
deleted file mode 100644
index 923b9cb264..0000000000
--- a/src/core/gpu/cl/kernels/ClReshapeKernel.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClReshapeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-
-#include <string>
-
-/** [ClReshapeKernel Kernel] **/
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
-
- if(dst->tensor_shape().total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().total_size() != dst->tensor_shape().total_size());
- }
-
- return Status{};
-}
-} // namespace
-
-void ClReshapeKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
-
- auto padding_info = get_padding_info({ src, dst });
-
- // Create kernel
- std::set<std::string> build_opts = { "-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()) };
- _kernel = create_kernel(compile_context, "reshape_layer", build_opts);
-
- // Add static arguments
- const cl_int2 src_shape =
- {
- {
- static_cast<cl_int>(src->tensor_shape()[0]),
- static_cast<cl_int>(src->tensor_shape()[1])
- }
- };
- const cl_int2 dst_shape =
- {
- {
- static_cast<cl_int>(dst->tensor_shape()[0]),
- static_cast<cl_int>(dst->tensor_shape()[1])
- }
- };
- unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the src and dst parameters
- _kernel.setArg<cl_int2>(idx++, src_shape);
- _kernel.setArg<cl_int2>(idx++, dst_shape);
-
- // Configure kernel window
- Window win = calculate_max_window(*src);
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClReshapeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
-
- return Status{};
-}
-
-void ClReshapeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = window_collapsed.first_slice_window_3D();
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- // Set srcs
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, window_collapsed);
- add_3D_tensor_argument(idx, dst, window_collapsed);
- enqueue(queue, *this, slice, lws_hint());
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-/** [ClReshapeKernel Kernel] **/
diff --git a/src/core/gpu/cl/kernels/ClReshapeKernel.h b/src/core/gpu/cl/kernels/ClReshapeKernel.h
deleted file mode 100644
index 0501b93f40..0000000000
--- a/src/core/gpu/cl/kernels/ClReshapeKernel.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_RESHAPE_KERNEL_H
-#define ARM_COMPUTE_CL_RESHAPE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the kernel to perform tensor reshaping */
-class ClReshapeKernel : public IClKernel
-{
-public:
- ClReshapeKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClReshapeKernel);
- /** Set the src and dst of the kernel
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data type supported: All.
- * @param[out] dst Destination tensor info. Data type supported: Same as @p src
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
-
- /** Static function to check if given info will lead to a valid configuration of @ref ClReshapeKernel
- *
- * @param[in] src Source tensor info. Data type supported: All
- * @param[in] dst Destination tensor info. Data type supported: Same as @p src
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace opencl
-} // namespace kernels
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_RESHAPE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClScaleKernel.cpp b/src/core/gpu/cl/kernels/ClScaleKernel.cpp
deleted file mode 100644
index 7fb5d2a5d3..0000000000
--- a/src/core/gpu/cl/kernels/ClScaleKernel.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClScaleKernel.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/ScaleUtils.h"
-#include "support/Cast.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-inline std::pair<float, float> calculate_scale_factors(const ITensorInfo *src, const ITensorInfo *dst, DataLayout data_layout, bool align_corners)
-{
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
- // Compute the ratio between source width/height and destination width/height
- const unsigned int src_width = src->dimension(idx_width);
- const unsigned int src_height = src->dimension(idx_height);
- const unsigned int dst_width = dst->dimension(idx_width);
- const unsigned int dst_height = dst->dimension(idx_height);
-
- float wr = arm_compute::scale_utils::calculate_resize_ratio(src_width, dst_width, align_corners);
- float hr = arm_compute::scale_utils::calculate_resize_ratio(src_height, dst_height, align_corners);
-
- return std::make_pair(wr, hr);
-}
-
-Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ScaleKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::U8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(dst == src);
- ARM_COMPUTE_RETURN_ERROR_ON(info.align_corners && !arm_compute::scale_utils::is_align_corners_allowed_sampling_policy(info.sampling_policy));
-
- float wr = 0.f;
- float hr = 0.f;
- const DataLayout data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
- std::tie(wr, hr) = calculate_scale_factors(src, dst, data_layout, info.align_corners);
-
- ARM_COMPUTE_RETURN_ERROR_ON(info.interpolation_policy == InterpolationPolicy::AREA && (wr > 1.f || hr > 1.f));
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info, BorderSize &border)
-{
- Window win{};
- bool window_changed{};
- unsigned int num_elems_processed_per_iteration = 0;
- const DataLayout data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
-
- switch(data_layout)
- {
- case DataLayout::NCHW:
- {
- if(info.border_mode == BorderMode::UNDEFINED)
- {
- border = BorderSize(0);
- }
-
- num_elems_processed_per_iteration = 4;
- // Configure kernel window
- win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- AccessWindowStatic input_access(src,
- -border.left, -border.top,
- src->dimension(0) + border.right,
- src->dimension(1) + border.bottom);
- AccessWindowHorizontal output_access(dst, 0, num_elems_processed_per_iteration);
-
- output_access.set_valid_region(win, calculate_valid_region_scale(*src,
- dst->tensor_shape(),
- info.interpolation_policy,
- info.sampling_policy,
- info.border_mode == BorderMode::UNDEFINED));
-
- window_changed = update_window_and_padding(win, input_access, output_access);
- }
- break;
- case DataLayout::NHWC:
- {
- // Configure kernel window
- win = calculate_max_window(*dst, Steps());
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-BorderSize ClScaleKernel::border_size() const
-{
- return BorderSize(static_cast<size_t>(_data_layout == DataLayout::NCHW));
-}
-
-Status ClScaleKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const ScaleKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, info));
- const DataLayout data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
- BorderSize border = BorderSize(static_cast<size_t>(data_layout == DataLayout::NCHW));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), info, border).first);
-
- return Status{};
-}
-
-void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info)
-{
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, info));
- auto padding_info = get_padding_info({ src, dst });
-
- // Info required for the static tuning
- _data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
-
- float wr = 0.f;
- float hr = 0.f;
- std::tie(wr, hr) = calculate_scale_factors(src, dst, _data_layout, info.align_corners);
- const bool call_quantized_kernel = is_data_type_quantized_asymmetric(src->data_type()) && info.interpolation_policy == InterpolationPolicy::BILINEAR;
-
- // Compute actual border size
- BorderSize border = border_size();
- const bool is_nhwc = _data_layout == DataLayout::NHWC;
-
- // Area interpolation behaves as Nearest Neighbour in case of up-sampling
- auto interpolation_policy_to_use = info.interpolation_policy;
- if(info.interpolation_policy == InterpolationPolicy::AREA && wr <= 1.f && hr <= 1.f)
- {
- interpolation_policy_to_use = InterpolationPolicy::NEAREST_NEIGHBOR;
- }
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, info, border);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
-
- // Create kernel
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(info.constant_border_value, src->data_type()));
- build_opts.add_option("-DBORDER_SIZE=" + support::cpp11::to_string(border.right));
- build_opts.add_option_if(info.border_mode == BorderMode::REPLICATE, "-DBORDER_MODE_REPLICATE");
- build_opts.add_option_if(is_nhwc, "-DDEPTH_OUT=" + support::cpp11::to_string(dst->dimension(2)));
- build_opts.add_option_if_else(info.sampling_policy == SamplingPolicy::CENTER, "-DSAMPLING_POLICY_CENTER", "-DSAMPLING_POLICY_TOP_LEFT");
- build_opts.add_option_if(info.align_corners, "-DALIGN_CORNERS");
- if(call_quantized_kernel)
- {
- const UniformQuantizationInfo qinfo = src->quantization_info().uniform();
- build_opts.add_option("-DSCALE=" + support::cpp11::to_string(qinfo.scale));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qinfo.offset));
- }
- std::string interpolation_name = string_from_interpolation_policy(interpolation_policy_to_use);
- std::transform(interpolation_name.begin(), interpolation_name.end(), interpolation_name.begin(), ::tolower);
- std::string kernel_name = "scale_" + interpolation_name;
- kernel_name += call_quantized_kernel ? "_quantized_" : "_";
- kernel_name += lower_string(string_from_data_layout(_data_layout));
-
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- if(is_nhwc)
- {
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
- }
-
- const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- unsigned int idx = is_nhwc ? 2 * num_arguments_per_4D_tensor() : 2 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
- const unsigned int src_width = src->dimension(idx_width);
- const unsigned int dst_height = src->dimension(idx_height);
-
- _kernel.setArg<float>(idx++, src_width);
- _kernel.setArg<float>(idx++, dst_height);
- _kernel.setArg<float>(idx++, wr);
- _kernel.setArg<float>(idx++, hr);
-
- // Set config_id for enabling LWS tuning
- _config_id = "scale_";
- _config_id += (info.border_mode == BorderMode::REPLICATE ? "Bord_rep" : "");
- _config_id += (info.sampling_policy == SamplingPolicy::CENTER ? "center" : "topleft");
- _config_id += (is_nhwc ? "nhwc" : "nchw");
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(3));
-}
-
-void ClScaleKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- switch(_data_layout)
- {
- case DataLayout::NCHW:
- {
- Window slice = window.first_slice_window_2D();
-
- do
- {
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, src, slice);
- add_2D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
- break;
- }
- case DataLayout::NHWC:
- {
- Window collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_4D();
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice);
- add_4D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClScaleKernel.h b/src/core/gpu/cl/kernels/ClScaleKernel.h
deleted file mode 100644
index ad7632c713..0000000000
--- a/src/core/gpu/cl/kernels/ClScaleKernel.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_SCALE_KERNEL_H
-#define ARM_COMPUTE_CL_SCALE_KERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the scale kernel */
-class ClScaleKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClScaleKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClScaleKernel);
-
- /** Initialise the kernel's inputs, output and interpolation policy
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
- * @param[out] dst Destination tensor info. Data types supported: Same as @p src
- * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
- * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to configure.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info);
-
- /** Static function to check if given info will lead to a valid configuration of @ref ClScaleKernel
- *
- * @param[in] src Source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
- * @param[in] dst Destination tensor info. Data types supported: Same as @p src
- * All but the lowest two dimensions must be the same size as in the input tensor, i.e. scaling is only performed within the XY-plane.
- * @param[in] info @ref ScaleKernelInfo Kernel descriptor to be used to validate
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const ScaleKernelInfo &info);
-
- // Inherited methods overridden:
- BorderSize border_size() const override;
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- DataLayout _data_layout = DataLayout::UNKNOWN;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLSCALEKERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClSoftmaxKernel.cpp b/src/core/gpu/cl/kernels/ClSoftmaxKernel.cpp
deleted file mode 100644
index 000c9ad04d..0000000000
--- a/src/core/gpu/cl/kernels/ClSoftmaxKernel.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClSoftmaxKernel.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/experimental/Types.h"
-#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-/** Calculates softmax parameters from the quantized input scale and scaling factor for the exponent and places them as build options.
- *
- * Prepares these build options:
- * -INPUT_BETA_MULTIPLIER, INPUT_BETA_LEFT_SHIFT - quantized representation of beta multiplier.
- * -DIFF_MIN - threshold difference between maximum value of input data and current processed value,
- * it defines whether the value will be taken into account or not.
- *
- * @param[in] build_opts Build options to extend
- * @param[in] input_scale Input scaling factor
- * @param[in] beta Exponent scaling factor beta
- */
-CLBuildOptions prepare_quantized_softmax_build_options(float input_scale, float beta)
-{
- // Number of integer bits in temporary fixed-point representation of current-to-max difference
- static const int scaled_diff_int_bits = 5;
- // Number of integer bits used in temporary fixed-point representation of exponent accumulator
- static const int exp_accumulation_in_bits = 12;
-
- const double beta_multiplier = std::min(
- 1.0 * beta * input_scale * (1 << (31 - scaled_diff_int_bits)),
- (1LL << 31) - 1.0);
- int input_beta_multiplier;
- int input_beta_left_shift;
- quantization::calculate_quantized_multiplier_greater_than_one(beta_multiplier, &input_beta_multiplier, &input_beta_left_shift);
-
- const double max_input_rescaled = 1.0 * ((1 << scaled_diff_int_bits) - 1) * (1LL << (31 - scaled_diff_int_bits)) / (1LL << input_beta_left_shift);
- const int diff_min = -1.f * std::floor(max_input_rescaled);
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DSCALED_DIFF_INT_BITS=" + support::cpp11::to_string(scaled_diff_int_bits));
- build_opts.add_option("-DEXP_ACCUMULATION_INT_BITS=" + support::cpp11::to_string(exp_accumulation_in_bits));
- build_opts.add_option("-DINPUT_BETA_MULTIPLIER=" + support::cpp11::to_string(input_beta_multiplier));
- build_opts.add_option("-DINPUT_BETA_LEFT_SHIFT=" + support::cpp11::to_string(input_beta_left_shift));
- build_opts.add_option("-DDIFF_MIN=" + support::cpp11::to_string(diff_min));
-
- return build_opts;
-}
-
-Status validate_arguments_1DMaxShiftExpSum(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &max);
-
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src.data_type());
-
- // Checks performed when output is configured
- if(dst.total_size() != 0)
- {
- if(is_quantized_asymmetric)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
- }
-
- // Checks performed when sum is configured
- if(sum.total_size() != 0)
- {
- if(is_quantized_asymmetric)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&sum, 1, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&max, &sum);
- }
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&max, &sum);
- }
-
- return Status{};
-}
-
-Status validate_arguments_1DNorm(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::S32, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &sum);
- ARM_COMPUTE_RETURN_ERROR_ON(info.is_log && !is_data_type_float(info.input_data_type));
-
- // Note: output should always have a scale of 1/256 and offset 0
- const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(info.input_data_type);
-
- // Checks performed when output is configured
- if(dst.total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
- if(!is_quantized_asymmetric)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
- ARM_COMPUTE_RETURN_ERROR_ON(dst.quantization_info() != allowed_quantization_info);
- }
- }
-
- return Status{};
-}
-} // namespace
-
-/**< Grid size (obtained through auto-tuning) */
-const unsigned int ClLogits1DMaxShiftExpSumKernel::_grid_size = 64;
-/**< Vector size in the serial case (obtained through auto-tuning) */
-const unsigned int ClLogits1DMaxShiftExpSumKernel::_serial_vector_size = 8;
-/**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
-const unsigned int ClLogits1DMaxShiftExpSumKernel::_parallel_vector_size = 4;
-
-void ClLogits1DMaxShiftExpSumKernel::configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &max, ITensorInfo &dst, ITensorInfo &sum, const SoftmaxKernelInfo &info)
-{
- auto padding_info = get_padding_info({ &src, &max, &dst, &sum });
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(sum, src.clone()->set_tensor_shape(max.tensor_shape()));
- auto_init_if_empty(dst, *src.clone());
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DMaxShiftExpSum(src, max, dst, sum));
-
- const DataType dt = src.data_type();
- const UniformQuantizationInfo qinfo = src.quantization_info().uniform();
- const size_t reduction_dim_size = src.dimension(0);
- const float beta = info.beta;
- const auto is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type);
- const int min_value = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
-
- ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(reduction_dim_size);
- const unsigned int vector_size = adjust_vec_size(std::get<1>(parallel_reduction_info), reduction_dim_size);
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt));
- build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
- build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(reduction_dim_size));
- build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(reduction_dim_size % vector_size));
- build_opts.add_option("-DLOG_VECTOR_SIZE=" + support::cpp11::to_string(lround(log2(vector_size))));
- build_opts.add_option_if((reduction_dim_size % vector_size) != 0, "-DNON_MULTIPLE_OF_VECTOR_SIZE");
- build_opts.add_option_if(is_signed_qasymm8, "-DQASYMM8_SIGNED");
- build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), "-DBETA=" + float_to_string_with_full_precision(beta));
- build_opts.add_option_if(is_data_type_float(dt) && info.is_log, "-DLOG_SOFTMAX");
- build_opts.add_option_if(is_data_type_float(dt), "-DMINVAL=" + ((dt == DataType::F16) ? std::string("-HALF_MAX") : std::string("-FLT_MAX")));
- build_opts.add_options_if(is_data_type_quantized_asymmetric(dt), prepare_quantized_softmax_build_options(qinfo.scale, beta).options());
-
- cl::NDRange lws_hint(cl::NullRange);
- std::string kernel_name = std::string("softmax_layer_max_shift_exp_sum_") + (is_data_type_quantized_asymmetric(dt) ? "quantized_" : "");
-
- // Configure parallel kernel if needed
- if(std::get<0>(parallel_reduction_info))
- {
- kernel_name += "parallel";
- bool is_grid_size_pow2 = (_grid_size != 0) && ((_grid_size & (_grid_size - 1)) == 0);
- build_opts.add_option_if(is_grid_size_pow2 && _grid_size <= 256, "-DGRID_SIZE=" + support::cpp11::to_string(_grid_size));
-
- // Handle boundary conditions.
- const unsigned int multiple_grid_size = (reduction_dim_size / vector_size) % _grid_size;
- build_opts.add_option_if((multiple_grid_size != 0) || ((reduction_dim_size % vector_size) != 0), "-DNON_MULTIPLE_OF_GRID_SIZE");
- // Setting _lws_hint in this way can also communicate grid_size to ClLogits1DMaxShiftExpSumKernel::run().
- // A single workgroup performs reduction in dimension 0 in the parallel case, hence lws[0]==gws[0].
- lws_hint = cl::NDRange(_grid_size);
- }
- else
- {
- kernel_name += "serial";
- }
-
- // Create kernel.
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure window
- Window win = calculate_max_window(src, Steps(reduction_dim_size));
- IClKernel::configure_internal(win, lws_hint);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClLogits1DMaxShiftExpSumKernel::validate(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DMaxShiftExpSum(src, max, dst, sum));
- return Status{};
-}
-
-ClLogits1DMaxShiftExpSumKernel::ParallelReductionInfo ClLogits1DMaxShiftExpSumKernel::is_parallel_reduction(size_t size)
-{
- bool is_parallel_reduction = (size >= (_grid_size * _serial_vector_size)) && (_grid_size > 1);
- unsigned int vector_size = is_parallel_reduction ? _parallel_vector_size : _serial_vector_size;
- return std::make_tuple(is_parallel_reduction, vector_size);
-}
-
-void ClLogits1DMaxShiftExpSumKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
- auto max = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0));
- auto sum = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_1));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, max, sum);
-
- // Collapse window in Z dimension
- Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
-
- // Reconfigure window in case of parallel reduction
- ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(src->info()->dimension(0));
- if(std::get<0>(parallel_reduction_info))
- {
- // Launch grid_size parallel work items
- window_collapsed.set(Window::DimX, Window::Dimension(0, _grid_size, 1));
- }
-
- // Get slices
- Window slice = window_collapsed.first_slice_window_3D();
- do
- {
- unsigned int idx = 0;
- // Set inputs
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, max, slice);
- add_3D_tensor_argument(idx, dst, slice);
- add_3D_tensor_argument(idx, sum, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
-
-void ClLogits1DNormKernel::configure(const CLCompileContext &compile_context, const ITensorInfo &src, const ITensorInfo &sum, ITensorInfo &dst, const SoftmaxKernelInfo &info)
-{
- auto padding_info = get_padding_info({ &src, &dst, &sum });
-
- // Note: output should always have a scale of 1/256 and offset 0
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(info.input_data_type);
- const DataType output_data_type = info.input_data_type;
- const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
- const UniformQuantizationInfo qinfo = src.quantization_info().uniform();
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(dst, src.clone()->set_data_type(output_data_type).set_quantization_info(allowed_quantization_info));
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DNorm(src, sum, dst, info));
-
- const auto is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type);
- const int min_value = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
- const unsigned int vector_size = adjust_vec_size(16, src.dimension(0));
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(info.input_data_type));
- build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
- build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
- build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(src.dimension(0) % vector_size));
- build_opts.add_option_if(is_data_type_quantized_asymmetric_signed(info.input_data_type), "-DQASYMM8_SIGNED");
- build_opts.add_options_if(is_quantized_asymmetric,
- prepare_quantized_softmax_build_options(qinfo.scale, info.beta).options());
- build_opts.add_option_if(info.is_log, "-DLOG_SOFTMAX");
-
- // Create kernel
- std::string kernel_name = std::string("softmax_layer_norm") + (is_quantized_asymmetric ? "_quantized" : "");
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure window
- auto win = calculate_max_window(src, Steps(vector_size));
- ICLKernel::configure_internal(win);
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClLogits1DNormKernel::validate(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DNorm(src, sum, dst, info));
-
- return Status{};
-}
-
-void ClLogits1DNormKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
- auto sum = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0));
-
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, sum);
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = window_collapsed.first_slice_window_3D();
-
- do
- {
- Window sum_slice = slice;
- sum_slice.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- unsigned int idx = 0;
- // Set inputs
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, sum, sum_slice);
- add_3D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClSoftmaxKernel.h b/src/core/gpu/cl/kernels/ClSoftmaxKernel.h
deleted file mode 100644
index af980eaa8e..0000000000
--- a/src/core/gpu/cl/kernels/ClSoftmaxKernel.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
-#define ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for max, shifting, exponentiating and summing the logits */
-class ClLogits1DMaxShiftExpSumKernel : public IClKernel
-{
- /**< Grid size (obtained through auto-tuning) */
- static const unsigned int _grid_size;
- /**< Vector size in the serial case (obtained through auto-tuning) */
- static const unsigned int _serial_vector_size;
- /**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
- static const unsigned int _parallel_vector_size;
-
-public:
- /** Info for whether a parallel reduction will be run and the vector size of the execution. */
- using ParallelReductionInfo = std::tuple<bool, unsigned int>;
-
- /** Default constructor */
- ClLogits1DMaxShiftExpSumKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DMaxShiftExpSumKernel);
- /** Configure the kernel using the given information about tensors
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in,out] max Max values tensor. Data types supported: same as @p src
- * @param[out] dst Destination tensor. Data types supported: same as @p src
- * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p src
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &max, ITensorInfo &dst, ITensorInfo &sum, const SoftmaxKernelInfo &info);
- /** Static function to check if given info will lead to a valid configuration of @ref ClLogits1DMaxShiftExpSumKernel
- *
- * @param[in] src Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
- * @param[in] max Max values tensor. Data types supported: same as @p src
- * @param[in] dst Destination tensor. Data types supported: same as @p src
- * @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p src
- *
- * @return a status
- */
- static Status validate(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum);
- /** Checks if the given size is eligible for parallel reduction
- *
- * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size).
- * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
- *
- * @param[in] size Size to check
- *
- * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run,
- * while the second element is the vector size of the execution.
- */
- static ParallelReductionInfo is_parallel_reduction(size_t size);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-
-/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
-class ClLogits1DNormKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClLogits1DNormKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DNormKernel);
-
- /** Set the input and output tensors.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
- * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
- * @param[out] dst Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo &src, const ITensorInfo &sum, ITensorInfo &dst, const SoftmaxKernelInfo &info);
- /** Static function to check if given info will lead to a valid configuration of @ref ClLogits1DNormKernel
- *
- * @param[in] src Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported.
- * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
- * @param[in] dst Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
- * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClTransposeKernel.cpp b/src/core/gpu/cl/kernels/ClTransposeKernel.cpp
deleted file mode 100644
index 704d0152cf..0000000000
--- a/src/core/gpu/cl/kernels/ClTransposeKernel.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClTransposeKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-void ClTransposeKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Output auto initialization if not yet initialized
- const TensorShape dst_shape = misc::shape_calculator::compute_transposed_shape(*src);
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(dst_shape));
-
- ARM_COMPUTE_ERROR_THROW_ON(ClTransposeKernel::validate(src, dst));
- auto padding_info = get_padding_info({ src, dst });
-
- // Create kernel
- const unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / src->element_size(), src->dimension(0));
- const int vec_size_x_leftovers = src->dimension(0) % vec_size_x;
- const unsigned int vec_size_y = adjust_vec_size(max_cl_vector_width / src->element_size(), src->dimension(1));
- const int vec_size_y_leftovers = src->dimension(1) % vec_size_y;
-
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE_IN_BYTES=" + support::cpp11::to_string(src->element_size()));
- build_opts.add_option("-DVEC_SIZE_X=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER_X=" + support::cpp11::to_string(vec_size_x_leftovers));
- build_opts.add_option("-DVEC_SIZE_Y=" + support::cpp11::to_string(vec_size_y));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER_Y=" + support::cpp11::to_string(vec_size_y_leftovers));
-
- _kernel = create_kernel(compile_context, "transpose", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps(vec_size_x, vec_size_y));
- ICLKernel::configure_internal(win, cl::NDRange(2, 8));
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClTransposeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->num_dimensions() > 2, "Transpose up to 2-D src tensor is supported");
-
- // Validate configured dst
- if(dst->total_size() != 0)
- {
- const TensorInfo dst_info = src->clone()->set_tensor_shape(misc::shape_calculator::compute_transposed_shape(*src));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &dst_info);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- }
-
- return Status{};
-}
-
-void ClTransposeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window slice = window.first_slice_window_2D();
-
- do
- {
- unsigned int idx = 0;
- add_2D_tensor_argument(idx, src, slice);
- add_2D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_2D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClTransposeKernel.h b/src/core/gpu/cl/kernels/ClTransposeKernel.h
deleted file mode 100644
index 21d4fd41f5..0000000000
--- a/src/core/gpu/cl/kernels/ClTransposeKernel.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_TRANSPOSE_KERNEL_H
-#define ARM_COMPUTE_CL_TRANSPOSE_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to transpose a 2D tensor. */
-class ClTransposeKernel : public IClKernel
-{
-public:
- ClTransposeKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClTransposeKernel);
- /** Set the src and dst of the kernel.
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src The src tensor info. Data types supported: All.
- * @param[in] dst The dst tensor info. Data types supported: Same as @p src
- */
- void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClTransposeKernel
- *
- * @param[in] src The src tensor info. Data types supported: All.
- * @param[in] dst The dst tensor info. Data types supported: same as @p src.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_TRANSPOSE_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp
deleted file mode 100644
index 9f970719ed..0000000000
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/tensor_info.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1);
- ARM_COMPUTE_RETURN_ERROR_ON(src1->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(0) + src2->dimension(0) > dst->dimension(0));
-
- for(size_t i = 1; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(i) != dst->dimension(i));
- ARM_COMPUTE_RETURN_ERROR_ON(src2->dimension(i) != dst->dimension(i));
- }
- ARM_COMPUTE_RETURN_ERROR_ON(src1->num_dimensions() > 4);
-
- return Status{};
-}
-} // namespace
-
-Status ClWidthConcatenate2TensorsKernel::validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src1, src2, dst));
- return Status{};
-}
-
-void ClWidthConcatenate2TensorsKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src1, src2, dst));
-
- auto padding_info = get_padding_info({ src1, src2, dst });
-
- const unsigned int min_dimension = std::min(src1->dimension(0), src2->dimension(0));
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(8, min_dimension);
- const unsigned int vec_size_leftover = dst->dimension(0) % num_elems_processed_per_iteration;
-
- // Add build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src1->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover));
- build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(src1->dimension(2)));
- build_opts.add_option("-DINPUT1_WIDTH=" + support::cpp11::to_string(src1->dimension(0)));
- build_opts.add_option("-DINPUT2_WIDTH=" + support::cpp11::to_string(src2->dimension(0)));
- build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(src1->element_size()));
- build_opts.add_option("-DINPUT1_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration));
-
- // If input have different quantization info set quantization parameters needed for the re-quantization process
- const bool have_different_qinfo = helpers::tensor_info::tensors_have_different_quantization_info(dst, src1, src2);
- if(is_data_type_quantized_asymmetric(src1->data_type()) && have_different_qinfo)
- {
- const UniformQuantizationInfo iq1_info = src1->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src2->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq1_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
- build_opts.add_option("-DOFFSET_IN2=" + float_to_string_with_full_precision(iq2_info.offset));
- build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "concatenate_width_x2", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win.collapse(win, Window::DimZ));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = "concatenate_width_x2_";
- _config_id += lower_string(string_from_data_type(src1->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(1));
-}
-
-void ClWidthConcatenate2TensorsKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- Window slice = window.first_slice_window_4D();
-
- const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_VEC));
- const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_VEC + 1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src0, slice);
- add_4D_tensor_argument(idx, src1, slice);
- add_4D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, window, lws_hint());
- }
- while(window.slide_window_slice_4D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
deleted file mode 100644
index ddade29113..0000000000
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_WIDTHCONCATENATE_2TENSORS_KERNEL_H
-#define ARM_COMPUTE_CL_WIDTHCONCATENATE_2TENSORS_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the width concatenate kernel of 2 tensors.
- * The src1 and src2 tensors will be concatenated into the dst tensor.
- */
-class ClWidthConcatenate2TensorsKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClWidthConcatenate2TensorsKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWidthConcatenate2TensorsKernel);
- /** Initialise the kernel's sources and destination
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src1 First source tensor info. Data types supported: All.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1
- * @param[out] dst Destination tensor info. Data types supported: Same as @p src1.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate2TensorsKernel
- *
- * @param[in] src1 First tensor info. Data types supported: All.
- * @param[in] src2 Second tensor info. Data types supported: same as @p src1
- * @param[in] dst Destination tensor info. Data types supported: Same as @p src1.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_WIDTH_CONCATENATE_2TENSORS_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp
deleted file mode 100644
index 281d190381..0000000000
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/utils/helpers/tensor_info.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *src3, const ITensorInfo *src4, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, src3, src4, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1);
- ARM_COMPUTE_RETURN_ERROR_ON(src1->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2, src3, src4, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(0) + src2->dimension(0) + src3->dimension(0) + src4->dimension(0) > dst->dimension(0));
-
- for(size_t i = 1; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(i) != dst->dimension(i));
- ARM_COMPUTE_RETURN_ERROR_ON(src2->dimension(i) != dst->dimension(i));
- ARM_COMPUTE_RETURN_ERROR_ON(src3->dimension(i) != dst->dimension(i));
- ARM_COMPUTE_RETURN_ERROR_ON(src4->dimension(i) != dst->dimension(i));
- }
- ARM_COMPUTE_RETURN_ERROR_ON(src1->num_dimensions() > 4);
-
- return Status{};
-}
-} // namespace
-
-ClWidthConcatenate4TensorsKernel::ClWidthConcatenate4TensorsKernel()
-{
-}
-
-Status ClWidthConcatenate4TensorsKernel::validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *src3, const ITensorInfo *src4, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src1, src2, src3, src4, dst));
- return Status{};
-}
-
-void ClWidthConcatenate4TensorsKernel::configure(const CLCompileContext &compile_context,
- ITensorInfo *src1, ITensorInfo *src2,
- ITensorInfo *src3, ITensorInfo *src4,
- ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, src3, src4, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src1, src2, src3, src4, dst));
-
- auto padding_info = get_padding_info({ src1, src2, src3, src4, dst });
- const unsigned int min_dimension = std::min(std::min(src1->dimension(0), src2->dimension(0)), std::min(src3->dimension(0), src4->dimension(0)));
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(8, min_dimension);
- const unsigned int vec_size_leftover = dst->dimension(0) % num_elems_processed_per_iteration;
-
- // Add build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src1->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover));
- build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(src1->dimension(2)));
- build_opts.add_option("-DINPUT1_WIDTH=" + support::cpp11::to_string(src1->dimension(0)));
- build_opts.add_option("-DINPUT2_WIDTH=" + support::cpp11::to_string(src2->dimension(0)));
- build_opts.add_option("-DINPUT3_WIDTH=" + support::cpp11::to_string(src3->dimension(0)));
- build_opts.add_option("-DINPUT4_WIDTH=" + support::cpp11::to_string(src4->dimension(0)));
- build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(src1->element_size()));
- build_opts.add_option("-DINPUT1_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration));
- build_opts.add_option("-DINPUT2_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) + src2->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration));
- build_opts.add_option("-DINPUT3_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) + src2->dimension(0) + src3->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration));
-
- // If soources have different quantization info set quantization parameters needed for the re-quantization process
- const bool have_different_qinfo = helpers::tensor_info::tensors_have_different_quantization_info(dst, src1, src2, src3, src4);
- if(is_data_type_quantized_asymmetric(src1->data_type()) && have_different_qinfo)
- {
- const UniformQuantizationInfo iq1_info = src1->quantization_info().uniform();
- const UniformQuantizationInfo iq2_info = src2->quantization_info().uniform();
- const UniformQuantizationInfo iq3_info = src3->quantization_info().uniform();
- const UniformQuantizationInfo iq4_info = src4->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq1_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
- build_opts.add_option("-DOFFSET_IN2=" + float_to_string_with_full_precision(iq2_info.offset));
- build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
- build_opts.add_option("-DOFFSET_IN3=" + float_to_string_with_full_precision(iq3_info.offset));
- build_opts.add_option("-DSCALE_IN3=" + float_to_string_with_full_precision(iq3_info.scale));
- build_opts.add_option("-DOFFSET_IN4=" + float_to_string_with_full_precision(iq4_info.offset));
- build_opts.add_option("-DSCALE_IN4=" + float_to_string_with_full_precision(iq4_info.scale));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "concatenate_width_x4", build_opts.options());
-
- // Configure kernel window
- Window win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win.collapse(win, Window::DimZ));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
- // Set config_id for enabling LWS tuning
- _config_id = "concatenate_width_x4_";
- _config_id += lower_string(string_from_data_type(src1->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src1->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src2->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src3->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src3->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src4->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src4->dimension(1));
-}
-
-void ClWidthConcatenate4TensorsKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_VEC));
- const auto src1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_VEC + 1));
- const auto src2 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_VEC + 2));
- const auto src3 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_VEC + 3));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- Window slice = window.first_slice_window_4D();
-
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src0, slice);
- add_4D_tensor_argument(idx, src1, slice);
- add_4D_tensor_argument(idx, src2, slice);
- add_4D_tensor_argument(idx, src3, slice);
- add_4D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, window, lws_hint());
- }
- while(window.slide_window_slice_4D(slice));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
deleted file mode 100644
index 19bda65902..0000000000
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef ARM_COMPUTE_CL_WIDTH_CONCATENATE_4TENSORS_KERNEL_H
-#define ARM_COMPUTE_CL_WIDTH_CONCATENATE_4TENSORS_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the width concatenate kernel of 4 tensors.
- * All source tensors will be concatenated into the destination tensor.
- */
-class ClWidthConcatenate4TensorsKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClWidthConcatenate4TensorsKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWidthConcatenate4TensorsKernel);
- /** Initialise the kernel's sources and destination
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src1 First source tensor info. Data types supported: All.
- * @param[in] src2 Second source tensor info. Data types supported: same as @p src1
- * @param[in] src3 Third source tensor info. Data types supported: same as @p src1
- * @param[in] src4 Fourth source tensor info. Data types supported: same as @p src1
- * @param[out] dst Destination tensor info. Data types supported: same as @p src1.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *src3, ITensorInfo *src4, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate4TensorsKernel
- *
- * @param[in] src1 First tensor info. Data types supported: All.
- * @param[in] src2 Second tensor info. Data types supported: same as @p src1
- * @param[in] src3 Third tensor info. Data types supported: same as @p src1
- * @param[in] src4 Fourth tensor info. Data types supported: same as @p src1
- * @param[in] dst Destination tensor info. Data types supported: same as @p src1.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *src3, const ITensorInfo *src4, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_WIDTH_CONCATENATE_4TENSORS_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp
deleted file mode 100644
index d188a5226b..0000000000
--- a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Utils.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
- ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) + width_offset > dst->dimension(0));
-
- for(size_t i = 1; i < Coordinates::num_max_dimensions; ++i)
- {
- ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(i) != dst->dimension(i));
- }
- ARM_COMPUTE_RETURN_ERROR_ON(src->num_dimensions() > 4);
-
- return Status{};
-}
-} // namespace
-
-ClWidthConcatenateKernel::ClWidthConcatenateKernel()
-{
-}
-
-Status ClWidthConcatenateKernel::validate(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, width_offset, dst));
- return Status{};
-}
-
-void ClWidthConcatenateKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, width_offset, dst));
-
- auto padding_info = get_padding_info({ src, dst });
-
- const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16, src->dimension(0));
-
- // Add build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
- build_opts.add_option("-DWIDTH_OFFSET=" + support::cpp11::to_string(width_offset));
- build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(src->dimension(2)));
-
- if(is_data_type_quantized_asymmetric(src->data_type()) && src->quantization_info() != dst->quantization_info())
- {
- const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iqinfo.offset));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oqinfo.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iqinfo.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oqinfo.scale));
- }
-
- // Create kernel
- _kernel = create_kernel(compile_context, "concatenate_width", build_opts.options());
- // Configure kernel window
- Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration));
- ICLKernel::configure_internal(win.collapse(win, Window::DimZ));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-void ClWidthConcatenateKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
-
- const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, window);
- add_4D_tensor_argument(idx, dst, window);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
deleted file mode 100644
index 6bc8e57a08..0000000000
--- a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_WIDTH_CONCATENATE_LAYER_KERNEL_H
-#define ARM_COMPUTE_CL_WIDTH_CONCATENATE_LAYER_KERNEL_H
-
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the width concatenate kernel.
- * The source tensor will be concatenated into the destination tensor.
- */
-class ClWidthConcatenateKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClWidthConcatenateKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWidthConcatenateKernel);
- /** Initialise the kernel's source and destination
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[in] width_offset The offset on the X axis.
- * @param[in,out] dst Destination tensor info. Data types supported: same as @p src.
- *
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst);
- /** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenateKernel
- *
- * @param[in] src Source tensor info. Data types supported: All.
- * @param[in] width_offset The offset on the X axis.
- * @param[in] dst Destination tensor info. Data types supported: same as @p src.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_WIDTH_CONCATENATE_LAYER_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp
deleted file mode 100644
index 381b4bcae9..0000000000
--- a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D output_tile_size = winograd_info.output_tile_size;
-
- const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, input->data_layout()), "Winograd filter transform not supported");
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_w) != kernel_size.width || input->dimension(idx_h) != kernel_size.height);
- ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input, winograd_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_UNUSED(output);
-
- const unsigned int num_elems_processed_per_iteration_x = input->data_layout() == DataLayout::NCHW ? input->dimension(0) : 1;
- const unsigned int num_elems_processed_per_iteration_y = input->dimension(1);
- const unsigned int num_elems_read_per_iteration_z = input->data_layout() == DataLayout::NCHW ? 1 : input->dimension(2);
-
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y, num_elems_read_per_iteration_z));
- Window win_collapsed = win.collapse(win, Window::DimZ);
- return std::make_pair(Status{}, win_collapsed);
-}
-} // namespace
-
-void ClWinogradFilterTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*src, winograd_info)));
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, winograd_info));
- auto padding_info = get_padding_info({ src, dst });
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DSRC_DIM_Z=" + support::cpp11::to_string(src->dimension(2)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_FILTER_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_FILTER_TRANSFORM_VERTICAL");
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D output_tile_size = winograd_info.output_tile_size;
-
- // Create kernel
- std::string kernel_name = "winograd_filter_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(src->data_layout()));
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- IClKernel::configure_internal(win_config.second);
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-}
-
-Status ClWinogradFilterTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, winograd_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get()).first);
-
- return Status{};
-}
-
-void ClWinogradFilterTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IClKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- // Setup output window
- Window window_out;
- window_out.use_tensor_dimensions(dst->info()->tensor_shape(), 0);
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, window);
- add_3D_tensor_argument(idx, dst, window_out);
- enqueue(queue, *this, window, lws_hint());
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h
deleted file mode 100644
index 2bc2ceb36e..0000000000
--- a/src/core/gpu/cl/kernels/ClWinogradFilterTransformKernel.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H
-#define ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the Winograd filter transform kernel. */
-class ClWinogradFilterTransformKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClWinogradFilterTransformKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradFilterTransformKernel);
- /** Set the input and output tensor.
- *
- * @note Winograd filter transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd filter transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout) or [IFM, kernel_x, kernel_y, OFM] (NHWC data layout). Data types supported: F16/F32.
- * @param[out] dst The output tensor info. The shape for this tensor can be calculated using the utility function @p compute_winograd_filter_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to ClWinogradFilterTransformKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_WINOGRAD_FILTER_TRANSFORM_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp
deleted file mode 100644
index 17f0eb9e2c..0000000000
--- a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd input transform only supports unit strides");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, input->data_layout()), "Winograd input transform not supported");
-
- ARM_COMPUTE_UNUSED(conv_info);
- ARM_COMPUTE_UNUSED(output_tile_size);
- ARM_COMPUTE_UNUSED(kernel_size);
-
- // Validate configured output
- if(output->total_size() != 0)
- {
- const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_UNUSED(output);
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- bool window_changed = false;
- Window win = calculate_max_window(*input, Steps(1, 1));
-
- if(input->data_layout() == DataLayout::NCHW)
- {
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
-
- unsigned int num_elems_read_per_iteration_x = output_tile_size.width + kernel_size.width - 1;
- unsigned int num_elems_read_per_iteration_y = output_tile_size.height + kernel_size.height - 1;
-
- AccessWindowRectangle input_access(input, -conv_info.pad_left(), -conv_info.pad_top(), num_elems_read_per_iteration_x, num_elems_read_per_iteration_y);
- window_changed = update_window_and_padding(win, input_access);
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-ClWinogradInputTransformKernel::ClWinogradInputTransformKernel()
- : _border_size(0), _data_layout(DataLayout::UNKNOWN), _num_tiles_x(0), _num_tiles_y(0), _step_z(1)
-{
-}
-
-BorderSize ClWinogradInputTransformKernel::border_size() const
-{
- return _border_size;
-}
-
-void ClWinogradInputTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, winograd_info));
-
- auto padding_info = get_padding_info({ src, dst });
-
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
-
- _data_layout = src->data_layout();
-
- const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
-
- // Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(src->dimension(idx_w), src->dimension(idx_h)),
- kernel_size,
- output_tile_size,
- conv_info);
-
- _num_tiles_x = num_tiles.width;
- _num_tiles_y = num_tiles.height;
-
- const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*src, winograd_info);
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(output_shape));
-
- ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(dst->dimension(1)));
- const size_t total_batches = src->tensor_shape().total_size_upper(3);
-
- CLBuildOptions build_opts;
- if(_data_layout == DataLayout::NHWC)
- {
- build_opts.add_option("-DNHWC");
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(idx_w)));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_h)));
- build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(_num_tiles_x));
- build_opts.add_option("-DNUM_TILES_Y=" + support::cpp11::to_string(_num_tiles_y));
- build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
- build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
- }
- else
- {
- build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(_num_tiles_x));
- build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
- build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
- build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
- build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
- build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(src->dimension(2)));
- }
-
- // Create kernel
- std::string kernel_name = "winograd_input_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string();
-
- // Get the maximum dimension from the tile size
- const unsigned int tile_max_dim = std::max(output_tile_size.width, output_tile_size.height);
-
- // Check optimized kernel if output_dims == 2x2
- if((tile_max_dim == 2) && (_data_layout == DataLayout::NCHW))
- {
- _step_z = (src->dimension(2) % 2) != 0 ? 1 : 2;
- }
-
- // Append stepz and data layout
- kernel_name += "_stepz";
- kernel_name += support::cpp11::to_string(_step_z);
- kernel_name += "_" + lower_string(string_from_data_layout(_data_layout));
-
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Create window and update padding
- auto win_config = validate_and_configure_window(src, dst, winograd_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- IClKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
-
- _border_size = BorderSize(src->padding());
-
- ARM_COMPUTE_ERROR_ON((src->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
-
- _config_id = kernel_name;
- _config_id += support::cpp11::to_string(src->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(2));
- _config_id += "_";
- _config_id += support::cpp11::to_string(conv_info.pad_left());
- _config_id += "_";
- _config_id += support::cpp11::to_string(conv_info.pad_top());
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(_data_layout));
-}
-
-Status ClWinogradInputTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, winograd_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), winograd_info).first);
- return Status{};
-}
-
-void ClWinogradInputTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const size_t idx_c = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
- const size_t total_batches = window.shape().total_size_upper(3);
-
- // Collapse window
- Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
-
- if(_data_layout == DataLayout::NHWC)
- {
- Window slice = window_collapsed.first_slice_window_3D();
- slice.set(1, Window::Dimension(0, _num_tiles_x * _num_tiles_y, 1));
- slice.set(2, Window::Dimension(0, total_batches, 1));
-
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice);
- add_4D_tensor_argument(idx, dst, slice);
- enqueue(queue, *this, slice, lws_hint());
- }
- else
- {
- Window slice = window_collapsed.first_slice_window_3D();
- slice.set(idx_w, Window::Dimension(0, _num_tiles_x, 1));
- slice.set(idx_h, Window::Dimension(0, _num_tiles_y, 1));
-
- ARM_COMPUTE_ERROR_ON(((slice[idx_c].end() - slice[idx_c].start()) % _step_z) != 0);
- slice.set(idx_c, Window::Dimension(slice[idx_c].start(), slice[idx_c].end(), _step_z));
-
- unsigned int idx = 2 * num_arguments_per_3D_tensor();
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src->info()->strides_in_bytes()[3]));
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[3]));
-
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
-
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window_collapsed.slide_window_slice_3D(slice));
- }
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h
deleted file mode 100644
index 76b45279a4..0000000000
--- a/src/core/gpu/cl/kernels/ClWinogradInputTransformKernel.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H
-#define ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** OpenCL kernel to perform Winograd input transform.*/
-class ClWinogradInputTransformKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClWinogradInputTransformKernel();
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradInputTransformKernel);
- /** Set the input and output of the kernel.
- *
- * @note Winograd input transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd input transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src The input tensor info to transform. Data types supported: F16/F32
- * @param[in] dst The output tensor info. The shape for this tensor can be calculated using the utility function @p compute_winograd_input_transform_shape. Data types supported: Same as @p input
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo.
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info);
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to ClWinogradInputTransformKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info);
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
-
-private:
- using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- BorderSize _border_size;
- DataLayout _data_layout;
- int _num_tiles_x;
- int _num_tiles_y;
- unsigned int _step_z;
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_WINOGRAD_INPUT_TRANSFORM_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp
deleted file mode 100644
index a6c05420ed..0000000000
--- a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/Cast.h"
-#include "support/StringSupport.h"
-
-#include <cmath>
-
-using namespace arm_compute::misc::shape_calculator;
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(act_info);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-
- ARM_COMPUTE_RETURN_ERROR_ON(output->data_layout() != winograd_info.output_data_layout);
-
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D input_dimensions = winograd_info.input_dimensions;
- const unsigned int num_channels = (winograd_info.kernel_size.width + winograd_info.output_tile_size.width - 1) * (winograd_info.kernel_size.height + winograd_info.output_tile_size.height - 1);
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, winograd_info.output_data_layout), "Winograd output transform not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(2) != num_channels, "Wrong number of channels");
-
- // Compute number of elements to process in the X and Y direction
- // Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(input_dimensions,
- kernel_size,
- output_tile_size,
- conv_info);
-
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != static_cast<unsigned int>((num_tiles.area())));
-
- if(bias != nullptr)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0));
- }
-
- // Checks performed when output is configured
- if(output->total_size() != 0)
- {
- const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input, winograd_info));
-
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- }
-
- return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output, const Size2D &output_tile_size)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_UNUSED(bias);
-
- constexpr unsigned int num_elems_processed_per_iteration = 1;
-
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- bool window_changed = false;
-
- if(output->data_layout() == DataLayout::NCHW)
- {
- const int output_static_window_end_x = ceil_to_multiple(output->dimension(0), output_tile_size.width);
- const int output_static_window_end_y = ceil_to_multiple(output->dimension(1), output_tile_size.height);
-
- AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration, num_elems_processed_per_iteration);
- AccessWindowStatic output_access(output, 0, 0, output_static_window_end_x, output_static_window_end_y);
- window_changed = update_window_and_padding(win, input_access, output_access);
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-void ClWinogradOutputTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const WinogradInfo &winograd_info,
- const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*src, winograd_info)));
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, winograd_info, act_info));
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, bias, dst, winograd_info.output_tile_size);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- IClKernel::configure_internal(win_config.second);
-
- auto padding_info = get_padding_info({ src, bias, dst });
-
- _is_nhwc = winograd_info.output_data_layout == DataLayout::NHWC;
-
- // Compute num_tiles_x
- const Size2D input_dimensions = winograd_info.input_dimensions;
- const Size2D kernel_size = winograd_info.kernel_size;
- const Size2D output_tile_size = winograd_info.output_tile_size;
- const PadStrideInfo conv_info = winograd_info.convolution_info;
- const int idx_width = get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::HEIGHT);
-
- // Compute the number of output tiles along the x and y direction of size "output_tile_size"
- const Size2D num_tiles = compute_winograd_convolution_tiles(input_dimensions,
- kernel_size,
- output_tile_size,
- conv_info);
- const size_t total_batches = dst->tensor_shape().total_size_upper(3);
-
- // Set build options
- CLBuildOptions build_opts;
- build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
- build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
- build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
-
- if((output_tile_size.x() == 2) || (output_tile_size.x() == 1 && output_tile_size.y() == 2))
- {
- build_opts.add_option("-DVEC_SIZE=2");
- }
- else if((output_tile_size.x() == 4) || (output_tile_size.x() == 1 && output_tile_size.y() == 4))
- {
- build_opts.add_option("-DVEC_SIZE=4");
- }
-
- build_opts.add_option_if(bias != nullptr, std::string("-DHAS_BIAS"));
- build_opts.add_option("-cl-fast-relaxed-math");
- build_opts.add_option("-DN0=" + support::cpp11::to_string(win_config.second.x().step()));
- build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(num_tiles.width));
- build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
- build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(1)));
- build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(idx_width)));
- build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(idx_height)));
- build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(src->dimension(2)));
- build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL");
- build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL");
-
- // Create kernel
- std::string kernel_name = "winograd_output_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_" + lower_string(string_from_data_layout(winograd_info.output_data_layout));
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
-
- // Set config_id for enabling LWS tuning
- _config_id = kernel_name;
- _config_id += "_";
- _config_id += lower_string(string_from_data_type(src->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(src->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(0));
- _config_id += "_";
- _config_id += support::cpp11::to_string(dst->dimension(1));
- _config_id += "_";
- _config_id += lower_string(string_from_data_layout(winograd_info.output_data_layout));
-
- ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info) && _is_nhwc);
-}
-
-Status ClWinogradOutputTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, (bias != nullptr ? bias->clone().get() : nullptr), dst, winograd_info, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), (bias != nullptr ? bias->clone().get() : nullptr), dst->clone().get(), winograd_info.output_tile_size).first);
- return Status{};
-}
-
-void ClWinogradOutputTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
-{
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IClKernel::window(), window);
-
- auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
- auto bias = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
- auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
-
- // Collapse window
- Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
-
- // Get initial windows
- Window slice = window_collapsed.first_slice_window_4D();
- slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
-
- // Setup output slice
- Window slice_out(slice);
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
-
- if(bias != nullptr)
- {
- unsigned int idx1 = 2 * num_arguments_per_4D_tensor();
- Window slice_biases;
- slice_biases.use_tensor_dimensions(bias->info()->tensor_shape());
- add_1D_tensor_argument(idx1, bias, slice_biases);
- }
-
- if(_is_nhwc)
- {
- unsigned int idx2 = 2 * num_arguments_per_4D_tensor() + ((bias != nullptr) ? num_arguments_per_1D_tensor() : 0);
- _kernel.setArg(idx2, static_cast<int>(dst->info()->total_size() - dst->info()->strides_in_bytes().y()));
- }
-
- do
- {
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice);
- add_4D_tensor_argument(idx, dst, slice_out);
- enqueue(queue, *this, slice, lws_hint());
- }
- while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_out));
-}
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h b/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h
deleted file mode 100644
index 48b27e658c..0000000000
--- a/src/core/gpu/cl/kernels/ClWinogradOutputTransformKernel.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2018-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H
-#define ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H
-
-#include "arm_compute/core/KernelDescriptors.h"
-#include "src/core/common/Macros.h"
-#include "src/core/gpu/cl/ClCompileContext.h"
-#include "src/core/gpu/cl/IClKernel.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-/** Interface for the Winograd output transform kernel. */
-class ClWinogradOutputTransformKernel : public IClKernel
-{
-public:
- /** Default constructor */
- ClWinogradOutputTransformKernel() = default;
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClWinogradOutputTransformKernel);
- /** Set the input and output tensor.
- *
- * @note Winograd output transform supports the following configurations for NCWH data layout
- * F(output tile, kernel size):F(2x2, 3x3), F(2x1, 3x1), F(1x2, 1x3),
- * F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * @note Winograd output transform supports the following configurations for NHWC data layout
- * F(output tile, kernel size):F(4x4, 3x3), F(4x1, 3x1), F(1x4, 1x3),
- * F(4x4, 5x5), F(4x1, 5x1), F(1x4, 1x5)
- *
- * Strides: only unit strides
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor info with shape [C, N, K, batches]. Data types supported: F16/F32.
- * @param[in] bias Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p src
- * @param[out] dst The output tensor info. The shape for this tensor can be calculated using the utility function @p compute_winograd_output_transform_shape. Data types supported: Same as @p src
- * @param[in] winograd_info Contains Winograd's information described in @ref WinogradInfo
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const WinogradInfo &winograd_info,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- /** Static function to check if given info will lead to a valid configuration
- *
- * Similar to ClWinogradOutputTransformKernel::configure()
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- using WinogradKey = std::pair<std::pair<int, int>, std::pair<int, int>>;
-
- bool _is_nhwc{ false };
-};
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_WINOGRAD_OUTPUT_TRANSFORM_KERNEL_H */
diff --git a/src/core/gpu/cl/kernels/gemm/ClGemmHelpers.cpp b/src/core/gpu/cl/kernels/gemm/ClGemmHelpers.cpp
deleted file mode 100644
index 7866ccb679..0000000000
--- a/src/core/gpu/cl/kernels/gemm/ClGemmHelpers.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_lhs_rhs_info(unsigned int m, unsigned int n, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
- bool lhs_interleave, bool rhs_interleave, bool lhs_transpose, bool rhs_transpose, bool export_to_cl_image)
-{
- ARM_COMPUTE_ERROR_ON(m0 == 0 || n0 == 0);
- v0 = std::max(std::min(static_cast<int>(m / m0), static_cast<int>(v0)), static_cast<int>(1));
- h0 = std::max(std::min(static_cast<int>(n / n0), static_cast<int>(h0)), static_cast<int>(1));
-
- const GEMMLHSMatrixInfo lhs_info(m0, k0, v0, lhs_transpose, lhs_interleave);
- const GEMMRHSMatrixInfo rhs_info(n0, k0, h0, rhs_transpose, rhs_interleave, export_to_cl_image);
-
- return std::make_pair(lhs_info, rhs_info);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> select_lhs_rhs_info(std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_img,
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_buf,
- unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, data_type);
- const TensorShape shape = misc::shape_calculator::compute_rhs_reshaped_shape(tensor_rhs_info, info_img.second);
- const TensorInfo tensor_reshaped_info(shape, 1, data_type);
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, info_img.second)))
- {
- return info_img;
- }
- else
- {
- return info_buf;
- }
-}
-
-void update_padding_for_cl_image(ITensorInfo *tensor)
-{
- constexpr unsigned int num_floats_per_pixel = 4;
-
- const unsigned int stride_y_in_elements = tensor->strides_in_bytes()[1] / tensor->element_size();
- const unsigned int pixel_alignment = get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device());
-
- ARM_COMPUTE_ERROR_ON_MSG(pixel_alignment == 0, "Cannot retrieve cl_image pitch alignment");
- if(pixel_alignment == 0)
- {
- return;
- }
-
- const unsigned int row_pitch_alignment = pixel_alignment * num_floats_per_pixel;
- const unsigned int round_up_width = ((stride_y_in_elements + row_pitch_alignment - 1) / row_pitch_alignment) * row_pitch_alignment;
- const unsigned int padding = round_up_width - stride_y_in_elements;
-
- tensor->extend_padding(PaddingSize(0, tensor->padding().right + padding, 0, 0));
-}
-
-Status validate_image2d_support_on_rhs(const ITensorInfo &tensor_reshaped_info, const GEMMRHSMatrixInfo &rhs_info)
-{
- if(rhs_info.export_to_cl_image)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.n0 == 2) || (rhs_info.n0 == 3), "Export to cl_image only supported with n0 = 4, 8 or 16");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.k0 == 2) || (rhs_info.k0 == 3), "Export to cl_image only supported with k0 = 4, 8 or 16");
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(&tensor_reshaped_info, DataType::F32, DataType::F16);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()), "The extension cl_khr_image2d_from_buffer is not supported on the target platform");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0, "Impossible to retrieve the cl_image pitch alignment");
-
- // Check the width and height of the output tensor.
- // Since we cannot create a 3d image from a buffer, the third dimension is collapsed on the second dimension
- const size_t max_image_w = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>();
- const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>();
-
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(tensor_reshaped_info.tensor_shape()[0] > max_image_w * 4, "Not supported width for cl_image");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(tensor_reshaped_info.tensor_shape()[1] * tensor_reshaped_info.tensor_shape()[2] > max_image_h, "Not supported height for cl_image");
- }
-
- return Status{};
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h b/src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h
deleted file mode 100644
index 3fce8c9173..0000000000
--- a/src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_HELPERS_H
-#define ARM_COMPUTE_CL_GEMM_HELPERS_H
-
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Configure @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- *
- * @param[in] m Number of rows (M) in the LHS matrix not reshaped
- * @param[in] n Number of columns (N) in the RHS matrix not reshaped
- * @param[in] m0 Number of rows processed by each thread/work-item
- * @param[in] n0 Number of columns processed by each thread/work-item
- * @param[in] k0 Number of inner accumulation performed by each thread/work-item
- * @param[in] v0 Number of vertical blocks of size (m0xk0) stored on the same output row
- * @param[in] h0 Number of horizontal blocks of size (k0xn0) stored on the same output row
- * @param[in] lhs_interleave True if the v0 (m0xk0) blocks have to be interleaved in the output row
- * @param[in] rhs_interleave True if the h0 (k0xn0) blocks have to be interleaved in the output row
- * @param[in] lhs_transpose True if the (m0xk0) block has to be transposed before been stored
- * @param[in] rhs_transpose True if the (k0xn0) block has to be transposed before been stored
- * @param[in] export_to_cl_image (Optional) True if the RHS reshaped matrix has to be exported to cl_image
- *
- * @return @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- */
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_lhs_rhs_info(unsigned int m, unsigned int n, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0,
- bool lhs_interleave, bool rhs_interleave, bool lhs_transpose, bool rhs_transpose, bool export_to_cl_image = false);
-
-/** Select @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- *
- * This function accepts two pairs of GEMMLHSMatrixInfo/GEMMRHSMatrixInfo where only the first is with cl_image2d support,
- * and selects the valid one validating the GEMMRHSMatrixInfo. If the validation passes, the functions will return
- * the first GEMMLHSMatrixInfo/GEMMRHSMatrixInfo pair with cl_image2d support.
- *
- * @param[in] info_img GEMMLHSMatrixInfo/GEMMRHSMatrixInfo with cl_image2d support
- * @param[in] info_buf GEMMLHSMatrixInfo/GEMMRHSMatrixInfo to fall-back if cl_image2d cannot be used
- * @param[in] n Number of columns (N) in the RHS matrix not reshaped
- * @param[in] k Number of rows (K) in the RHS matrix not reshaped
- * @param[in] b Batch size
- * @param[in] data_type Data type
- *
- * @return @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo
- */
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> select_lhs_rhs_info(std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_img,
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> info_buf,
- unsigned int n, unsigned int k, unsigned int b, DataType data_type);
-
-/** Update padding required to export the OpenCL buffer to OpenCL image2d
- *
- * @param[in,out] tensor ITensorInfo of the tensor required to be exported to OpenCL image2d
- */
-void update_padding_for_cl_image(ITensorInfo *tensor);
-
-/** Utility function to validate the image2d OpenCL object support on the RHS reshaped matrix
- *
- * @param[in] tensor_reshaped_info TensorInfo for the RHS reshaped matrix
- * @param[in] rhs_info @ref GEMMRHSMatrixInfo
- *
- * @return Status reporting if we can use the image2d OpenCL object on the RHS reshaped matrix
- */
-Status validate_image2d_support_on_rhs(const ITensorInfo &tensor_reshaped_info, const GEMMRHSMatrixInfo &rhs_info);
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_HELPERS_H */
diff --git a/src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h b/src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h
deleted file mode 100644
index a49836cfda..0000000000
--- a/src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_ICL_GEMM_KERNEL_CONFIG_H
-#define ARM_COMPUTE_ICL_GEMM_KERNEL_CONFIG_H
-
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/Types.h"
-#include "src/core/common/Macros.h"
-
-#include <array>
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Basic container for the OpenCL GEMM configuration functions */
-template <class T>
-class CLGEMMConfigArray
-{
-public:
- /** Alias for F32 index */
- static constexpr size_t DT_F32 = 0;
- /** Alias for F16 index */
- static constexpr size_t DT_F16 = 1;
- /** Alias for Int8 index */
- static constexpr size_t DT_INT8 = 2;
-
- /** Constructor
- *
- * @param[in] func_f32 Function to call for GEMM F32
- * @param[in] func_f16 Function to call for GEMM F16
- * @param[in] func_int8 Function to call for GEMM Int8 (QASYMM8, QASYMM8_SIGNED, QSYMM8_PER_CHANNEL)
- *
- */
- CLGEMMConfigArray(T func_f32, T func_f16, T func_int8)
- : _configs{ func_f32, func_f16, func_int8 }
- {
- }
-
- /** Method to return the GEMM configuration function based on data type
- *
- * @param[in] data_type Input data type
- *
- * @return the valid function otherwise it returns nullptr if the data type is not valid
- */
- T get_function(DataType data_type)
- {
- switch(data_type)
- {
- case DataType::F32:
- return _configs.at(DT_F32);
- case DataType::F16:
- return _configs.at(DT_F16);
- case DataType::QASYMM8:
- case DataType::QASYMM8_SIGNED:
- case DataType::QSYMM8_PER_CHANNEL:
- return _configs.at(DT_INT8);
- default:
- return nullptr;
- }
- }
-
-private:
- std::array<T, 3> _configs;
-};
-
-/** Basic interface for the GEMM kernel configuration */
-class IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] arch GPU target
- */
- IClGemmKernelConfig(GPUTarget arch)
- : _target(arch)
- {
- }
- ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(IClGemmKernelConfig);
- /** Virtual destructor */
- virtual ~IClGemmKernelConfig() = default;
- /** Given M, N, K and B, this method returns the @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo to be used
- *
- * @param[in] m Number of rows LHS matrix
- * @param[in] n Number of columns RHS matrix
- * @param[in] k Number of columns LHS matrix or number of rows RHS matrix
- * @param[in] b Batch size
- * @param[in] data_type Data type
- */
- virtual std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) = 0;
-
-protected:
- GPUTarget _target;
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_ICL_GEMM_KERNEL_CONFIG_H */
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp b/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp
deleted file mode 100644
index 9d11006703..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-ClGemmDefaultConfigNativeBifrost::ClGemmDefaultConfigNativeBifrost(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigNativeBifrost::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G71(&ClGemmDefaultConfigNativeBifrost::configure_G71_f32,
- &ClGemmDefaultConfigNativeBifrost::configure_G71_f32, // We use the F32 heuristic
- &ClGemmDefaultConfigNativeBifrost::configure_G71_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G76(&ClGemmDefaultConfigNativeBifrost::configure_G76_f32,
- &ClGemmDefaultConfigNativeBifrost::configure_G76_f32, // We use the F32 heuristic
- &ClGemmDefaultConfigNativeBifrost::configure_G76_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G7x(&ClGemmDefaultConfigNativeBifrost::configure_default_f32,
- &ClGemmDefaultConfigNativeBifrost::configure_default_f32, // We use the F32 heuristic
- &ClGemmDefaultConfigNativeBifrost::configure_default_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G76:
- func = configs_G76.get_function(data_type);
- break;
- case GPUTarget::G71:
- func = configs_G71.get_function(data_type);
- break;
- default:
- func = configs_G7x.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure_G71_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure_G71_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- if(m < 64)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
- }
- }
- }
- else
- {
- if(m == 1)
- {
- if(n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 16, 1, 1, false, false, false, false);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n > 4196)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 2, 1, 1, false, false, false, false);
- }
- else
- {
- if(k < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 1, false, false, false, false);
- }
- else if(k >= 2048 && k < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 1, false, false, false, false);
- }
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- if(m < 64)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure_default_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 1, false, false, false, false);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeBifrost::configure_default_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.h b/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.h
deleted file mode 100644
index 385b96e40e..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_BIFROST_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_BIFROST_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Bifrost based OpenCL GEMMNative configuration */
-class ClGemmDefaultConfigNativeBifrost final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigNativeBifrost(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G71_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G71_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_default_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_default_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_BIFROST_H */
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.cpp b/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.cpp
deleted file mode 100644
index e3c129e3be..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-ClGemmDefaultConfigNativeMidgard::ClGemmDefaultConfigNativeMidgard(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeMidgard::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigNativeMidgard::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_default(nullptr,
- nullptr,
- &ClGemmDefaultConfigNativeMidgard::default_q8);
-
- auto func = configs_default.get_function(data_type);
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeMidgard::default_q8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- const unsigned int m0 = std::min(m, static_cast<unsigned int>(4));
- const unsigned int n0 = std::min(n, static_cast<unsigned int>(4));
-
- return configure_lhs_rhs_info(m, n, m0, n0, 2, 1, 1, false, false, false, false);
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.h b/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.h
deleted file mode 100644
index 0ff5471f7c..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_MIDGARD_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_MIDGARD_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Midgard based OpenCL GEMMNative configuration */
-class ClGemmDefaultConfigNativeMidgard final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigNativeMidgard(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> default_q8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_MIDGARD_H */
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.cpp b/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.cpp
deleted file mode 100644
index 92767aca52..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-ClGemmDefaultConfigNativeValhall::ClGemmDefaultConfigNativeValhall(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigNativeValhall::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_default(&ClGemmDefaultConfigNativeValhall::configure_G77_f32,
- &ClGemmDefaultConfigNativeValhall::configure_G77_f16,
- &ClGemmDefaultConfigNativeValhall::configure_G77_u8);
-
- auto func = configs_default.get_function(data_type);
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 2, 1, 1, false, false, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigNativeValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- if(n < 2048)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 1, false, false, false, false);
- }
- else if(n >= 2048 && n < 16384)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- if(m < 64)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 16, 1, 1, false, false, false, false);
- }
- }
- }
- else
- {
- if(m == 1)
- {
- if(n < 8192)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 1, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 16, 1, 1, false, false, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 16, 1, 1, false, false, false, false);
- }
- }
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.h b/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.h
deleted file mode 100644
index 17e4c9d339..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_VALHALL_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_VALHALL_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Valhall based OpenCL GEMMNative configuration */
-class ClGemmDefaultConfigNativeValhall final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigNativeValhall(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_NATIVE_VALHALL_H */
diff --git a/src/core/gpu/cl/kernels/gemm/native/ClGemmNativeKernelConfig.h b/src/core/gpu/cl/kernels/gemm/native/ClGemmNativeKernelConfig.h
deleted file mode 100644
index ff6a0128af..0000000000
--- a/src/core/gpu/cl/kernels/gemm/native/ClGemmNativeKernelConfig.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_NATIVE_KERNEL_CONFIGURATION_H
-#define ARM_COMPUTE_CL_GEMM_NATIVE_KERNEL_CONFIGURATION_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-#include "src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.h"
-#include "src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeMidgard.h"
-#include "src/core/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeValhall.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** CLGEMMNative factory class */
-class ClGemmNativeKernelConfigurationFactory final
-{
-public:
- /** Static method to construct CLGEMMNative kernel object accordingly with the GPU target
- *
- * @param[in] gpu GPU target
- *
- * @return CLGEMMNative kernel configuration class
- */
- static std::unique_ptr<IClGemmKernelConfig> create(GPUTarget gpu)
- {
- switch(get_arch_from_target(gpu))
- {
- case GPUTarget::MIDGARD:
- return std::make_unique<ClGemmDefaultConfigNativeMidgard>(gpu);
- case GPUTarget::BIFROST:
- return std::make_unique<ClGemmDefaultConfigNativeBifrost>(gpu);
- case GPUTarget::VALHALL:
- return std::make_unique<ClGemmDefaultConfigNativeValhall>(gpu);
- default:
- ARM_COMPUTE_ERROR("Not supported GPU target");
- }
- }
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CL_GEMM_NATIVE_KERNEL_CONFIGURATION_H */
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.cpp b/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.cpp
deleted file mode 100644
index b030913a87..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.cpp
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-ClGemmDefaultConfigReshapedBifrost::ClGemmDefaultConfigReshapedBifrost(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigReshapedBifrost::*)(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G7x(&ClGemmDefaultConfigReshapedBifrost::configure_G7x_f32,
- &ClGemmDefaultConfigReshapedBifrost::configure_G7x_f16,
- &ClGemmDefaultConfigReshapedBifrost::configure_G7x_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G52(&ClGemmDefaultConfigReshapedBifrost::configure_G52_f32,
- &ClGemmDefaultConfigReshapedBifrost::configure_G52_f16,
- &ClGemmDefaultConfigReshapedBifrost::configure_G7x_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G76(&ClGemmDefaultConfigReshapedBifrost::configure_G76_f32,
- &ClGemmDefaultConfigReshapedBifrost::configure_G76_f16,
- &ClGemmDefaultConfigReshapedBifrost::configure_G76_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G76:
- func = configs_G76.get_function(data_type);
- break;
- case GPUTarget::G52:
- func = configs_G52.get_function(data_type);
- break;
- default:
- func = configs_G7x.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 2, 16, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 8, 2, true, true, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 4, 2, true, true, true, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 2, 2, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 2, 2, true, false, false, true);
- }
- }
- else
- {
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 2, 2, true, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 6, 4, 4, 2, 2, true, true, false, true);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(workload <= 274.4000f)
- {
- if(r_nk <= 0.7461f)
- {
- if(r_mn <= 21.1667f)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 4, 4, 4, false, true, true, false, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_mk <= 17.3926f)
- {
- if(workload <= 542.4000f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_nk <= 0.5463f)
- {
- if(workload <= 11767.6001f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, true, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
-
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 323.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 4, 8, false, false, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 2, 2, true, true, true, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- // Get lhs_info/rhs_info in case of OpenCL buffer
- if(n <= 4)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true);
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 2, 8, 16, false, false, false, true);
- }
-
- // Get lhs_info/rhs_info in case of OpenCL image
- // Condition on the GPU workload
- if((m / 4) * (n / 4) >= 2560)
- {
- // Big workload
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 8, true, true, true, false, true);
- }
- else
- {
- // Small workload
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 1, true, true, true, false, true);
- }
-
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, DataType::F32);
- const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, rhs_info_img);
- const TensorInfo tensor_reshaped_info(shape, 1, DataType::F32);
-
- // In case of vector by matrix with few work-items, we use the OpenCL buffer rather than the OpenCL image2d
- const bool use_cl_image2d = (n <= 4) ? false : true;
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info_img)) && use_cl_image2d)
- {
- return std::make_pair(lhs_info_img, rhs_info_img);
- }
- else
- {
- return std::make_pair(lhs_info_buf, rhs_info_buf);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(workload <= 1595.2000f)
- {
- if(r_mk <= 2.1044f)
- {
- if(workload <= 870.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 2, true, false, true, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 4, 2, 2, false, false, true, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 4, 2, 2, false, false, true, false, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 4, 2, true, true, true, false, false);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 4, 1, false, false, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 2, 2, false, true, false, true);
- }
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.h b/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.h
deleted file mode 100644
index 52e6ce3f48..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_BIFROST_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_BIFROST_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Bifrost based OpenCL GEMMReshaped configuration */
-class ClGemmDefaultConfigReshapedBifrost final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigReshapedBifrost(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_BIFROST_H */
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.cpp b/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.cpp
deleted file mode 100644
index 57e42c92b3..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.cpp
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-ClGemmDefaultConfigReshapedValhall::ClGemmDefaultConfigReshapedValhall(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigReshapedValhall::*)(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G77(&ClGemmDefaultConfigReshapedValhall::configure_G77_f32,
- &ClGemmDefaultConfigReshapedValhall::configure_G77_f16,
- &ClGemmDefaultConfigReshapedValhall::configure_G77_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G78(&ClGemmDefaultConfigReshapedValhall::configure_G78_f32,
- &ClGemmDefaultConfigReshapedValhall::configure_G78_f16,
- &ClGemmDefaultConfigReshapedValhall::configure_G77_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G78:
- func = configs_G78.get_function(data_type);
- break;
- case GPUTarget::G77:
- default:
- func = configs_G77.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, 1, 0, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 2, 16, 0, 1, 0, 1);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 1, 0, 0);
-
- if(r_mk <= 0.11824845522642136)
- {
- if(workload <= 880.0)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 4, 0, 0, 1, 0, 0);
- }
- else
- {
- if(r_nk <= 0.42521367967128754)
- {
- if(workload <= 1726.4000244140625)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 0);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, 0, 1, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- if(workload <= 1241.6000366210938)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 4, 0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 1, 0, 0);
- }
- }
- }
- }
- else
- {
- if(workload <= 11404.7998046875)
- {
- if(r_mk <= 1.0126488208770752)
- {
- if(r_mn <= 2.545312523841858)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, 0, 1, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 4, 0, 0, 1, 0, 0);
- }
- }
- else
- {
- if(workload <= 2881.199951171875)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 2, 0, 0, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, 0, 1, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- else
- {
- if(r_nk <= 0.5765306055545807)
- {
- if(r_mn <= 6.010416746139526)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, 0, 1, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, 1, 0, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 1, 1, 0, 1, 0, 1);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedValhall::configure_G78_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 1288.0000f)
- {
- if(workload <= 505.6000f)
- {
- if(r_mn <= 0.4466f)
- {
- if(r_nk <= 0.2384f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 4, 4, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 2, 2, 0, 0, 1, 0, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 2, 2, 0, 0, 1, 0, 0);
- }
- }
- else
- {
- if(r_mn <= 0.2250f)
- {
- if(r_mn <= 0.1599f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 4, 4, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- if(r_mk <= 0.7609f)
- {
- if(r_mn <= 2.5453f)
- {
- if(workload <= 1089.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 4, 4, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 2, 4, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 4, 4, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 4, 4, 0, 0, 1, 0, 1);
- }
- }
- }
- }
- else
- {
- if(workload <= 5434.4001f)
- {
- if(workload <= 1603.2000f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- if(r_nk <= 0.6192f)
- {
- if(r_mn <= 16.1016f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- if(workload <= 2750.0000f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- if(r_mk <= 6.3151f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 0, 1, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- }
- }
- }
- else
- {
- if(r_mk <= 0.0387f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 1, 0, 1);
- }
- else
- {
- if(r_mk <= 2.5859f)
- {
- if(r_mk <= 0.2734f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- }
- }
- }
- }
- else
- {
- if(r_mk <= 25.7500f)
- {
- if(r_mk <= 0.3615f)
- {
- if(r_mn <= 0.0913f)
- {
- if(r_mk <= 0.0683f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 4, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 4, 4, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- if(workload <= 11174.3999f)
- {
- if(r_mk <= 0.8047f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- if(workload <= 7185.5999f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 4, 2, 0, 0, 1, 0, 1);
- }
- }
- }
- else
- {
- if(workload <= 17917.5000f)
- {
- if(r_mk <= 1.5078f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- if(workload <= 34449.6016f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 4, 0, 0, 1, 0, 1);
- }
- }
- }
- }
- }
- else
- {
- if(r_mk <= 331.1111f)
- {
- if(workload <= 53397.5996f)
- {
- if(r_mn <= 57.8063f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 0, 1, 1);
- }
- }
- else
- {
- if(r_nk <= 0.9211f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 4, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 0, 1, 1);
- }
- }
- }
- else
- {
- if(workload <= 38070.4004f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, 0, 0, 0, 1, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedValhall::configure_G78_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 801.6000f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, 0, 0, 1, 0, 1);
- }
- else
- {
- if(r_mn <= 0.1211f)
- {
- if(workload <= 3296.0000f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- if(r_nk <= 1.0625f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 4, 0, 0, 1, 0, 1);
- }
- }
- }
- else
- {
- if(workload <= 5068.8000f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, 0, 0, 1, 0, 1);
- }
- else
- {
- if(r_nk <= 0.2361f)
- {
- if(workload <= 12630.0000f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 1, 0, 0, 1, 0, 1);
- }
- }
- else
- {
- if(workload <= 178790.3984f)
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 2, 2, 0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, 0, 0, 1, 0, 1);
- }
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(n <= 4)
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 4, 1, 0, 0, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 2, 2, 0, 1, 0, 1);
- }
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.h b/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.h
deleted file mode 100644
index 588cd64e0e..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_VALHALL_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_VALHALL_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Valhall based OpenCL GEMMReshaped configuration */
-class ClGemmDefaultConfigReshapedValhall final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigReshapedValhall(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G78_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G78_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_VALHALL_H */
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmReshapedKernelConfig.h b/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmReshapedKernelConfig.h
deleted file mode 100644
index c990c89a91..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped/ClGemmReshapedKernelConfig.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_RESHAPED_KERNEL_CONFIGURATION_H
-#define ARM_COMPUTE_CL_GEMM_RESHAPED_KERNEL_CONFIGURATION_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-#include "src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedBifrost.h"
-#include "src/core/gpu/cl/kernels/gemm/reshaped/ClGemmDefaultConfigReshapedValhall.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** CLGEMMReshaped factory class */
-class ClGemmReshapedKernelConfigurationFactory final
-{
-public:
- /** Static method to call the CLGEMMReshaped kernel configuration class accordingly with the GPU target
- *
- * @param[in] gpu GPU target
- *
- * @return CLGEMMReshaped kernel configuration class
- */
- static std::unique_ptr<IClGemmKernelConfig> create(GPUTarget gpu)
- {
- switch(get_arch_from_target(gpu))
- {
- case GPUTarget::MIDGARD:
- case GPUTarget::BIFROST:
- return std::make_unique<ClGemmDefaultConfigReshapedBifrost>(gpu);
- case GPUTarget::VALHALL:
- return std::make_unique<ClGemmDefaultConfigReshapedValhall>(gpu);
- default:
- ARM_COMPUTE_ERROR("Not supported GPU target");
- }
- }
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_RESHAPED_KERNEL_CONFIGURATION_H */
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.cpp b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.cpp
deleted file mode 100644
index 7ed6b39f3e..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.cpp
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-ClGemmDefaultConfigReshapedRhsOnlyBifrost::ClGemmDefaultConfigReshapedRhsOnlyBifrost(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigReshapedRhsOnlyBifrost::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G51(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G52(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G76(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G7x(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G76:
- func = configs_G76.get_function(data_type);
- break;
- case GPUTarget::G51:
- func = configs_G51.get_function(data_type);
- break;
- case GPUTarget::G52:
- func = configs_G52.get_function(data_type);
- break;
- default:
- func = configs_G7x.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n <= 2548)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 4, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 8, false, true, false, true, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 4, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- const bool is_workload_big = ((m * n * b) / 16) >= 2048;
-
- if(m == 1)
- {
- if(n >= 8192)
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 8, 1, h0, false, true, false, true, false);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- if(n <= 204)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true, false);
- }
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(16)), static_cast<int>(1));
- if(is_workload_big)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, h0, false, true, false, true);
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, true);
- }
- }
-
- // Get lhs_info/rhs_info in case of OpenCL image
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(16)), static_cast<int>(1));
- if(is_workload_big)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, h0, false, true, false, false, true);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, true, true);
- }
-
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, DataType::F32);
- const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, rhs_info_img);
- const TensorInfo tensor_reshaped_info(shape, 1, DataType::F32);
-
- // In case of vector by matrix or small workloads, we use the OpenCL buffer rather than the OpenCL image2d
- const bool use_cl_image2d = ((m == 1) || ((((m * n * b) / 16) < 2048) && n < 128)) ? false : true;
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info_img)) && use_cl_image2d)
- {
- return std::make_pair(lhs_info_img, rhs_info_img);
- }
- else
- {
- return std::make_pair(lhs_info_buf, rhs_info_buf);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(m == 1)
- {
- if(r_nk <= 0.4664f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 16, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(workload <= 274.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 16, false, false, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, false, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, false, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int n0 = n < 1280 ? 2 : 4;
- const unsigned int h0 = std::max(n / n0, 1U);
- return configure_lhs_rhs_info(m, n, 1, n0, 4, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n > 2048)
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 4, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(m == 1)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, false);
-
- if(r_mk <= 0.0026f)
- {
- if(r_nk <= 0.4664f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- if(r_mk <= 0.0148f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 8, 4, 1, 2, false, false, false, false, false);
-
- if(workload <= 362.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 16, false, false, false, true, false);
- }
- else
- {
- if(r_mn <= 22.6067f)
- {
- if(workload <= 708.8000f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, false, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 16, false, false, false, false, false);
- }
- }
- else
- {
- if(r_nk <= 0.0917f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 16, false, false, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, false, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
-
- if(m == 1)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 7449.60f)
- {
- if(workload <= 691.60f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 8, false, false, false, false, false);
- }
- else
- {
- if(workload <= 4155.20f)
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 32, false, false, false, false, false);
- }
- }
- }
- else
- {
- if(workload <= 16300.80f)
- {
- if(r_mn <= 44.56f)
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
- }
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int n0 = n < 1280 ? 2 : 4;
- const unsigned int h0 = std::max(n / n0, 1U);
- return configure_lhs_rhs_info(m, n, 1, n0, 8, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, h0, false, true, false, true);
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 2), static_cast<int>(128)), static_cast<int>(1));
- if(m == 1)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 1, h0, false, true, false, true);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 1, h0, false, true, false, true);
- }
-}
-
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h
deleted file mode 100644
index 7b1a1fb04d..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_RHS_ONLY_BIFROST_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_RHS_ONLY_BIFROST_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Bifrost based OpenCL GEMMReshapedOnlyRHS configuration */
-class ClGemmDefaultConfigReshapedRhsOnlyBifrost final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigReshapedRhsOnlyBifrost(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G51_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G51_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G51_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_RHS_ONLY_BIFROST_H */
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.cpp b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.cpp
deleted file mode 100644
index 4c6e633896..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.cpp
+++ /dev/null
@@ -1,570 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-ClGemmDefaultConfigReshapedRhsOnlyValhall::ClGemmDefaultConfigReshapedRhsOnlyValhall(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigReshapedRhsOnlyValhall::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G77(&ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G78(&ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G78:
- func = configs_G78.get_function(data_type);
- break;
- case GPUTarget::G77:
- default:
- func = configs_G77.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- if(m == 1)
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(r_mk <= 0.0064484127797186375)
- {
- if(r_mn <= 0.0028273810748942196)
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- const unsigned int h0 = std::max(n / 4, 1U);
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, 0, 1, 0, 0, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 4, 1, h0, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 8, 0, 1, 0, 0, 0);
- }
- }
- else
- {
- if(r_mk <= 0.020312500186264515)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 4, 0, 1, 0, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, 0, 1, 0, 1, 0);
- }
- }
- }
- else
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(workload <= 1999.2000122070312)
- {
- if(workload <= 747.1999816894531)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 2, 0, 0, 0, 1, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_mn <= 0.03348214365541935)
- {
- if(r_mk <= 0.028125000186264515)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 2, 0, 0, 0, 1, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, 0, 1, 0, 0, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 16, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- if(n <= 836.0)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, 0, 1, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, 0, 1, 0, 1, 0);
- }
- }
- else if(m < 128)
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(k >= 512)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, 0, 1, 0, 0);
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(n >= 64)
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 1, h0, 0, 1, 0, 0);
- }
- else
- {
- if(k >= 512)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, 0, 1, 0, 0);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, 0, 1, 0, 1);
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(m >= 28)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, h0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, 0, 1, 0, 1);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(m == 1)
- {
- if(workload <= 278.7000f)
- {
- if(workload <= 7.5000f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mn <= 0.0031f)
- {
- if(workload <= 256.6000f)
- {
- if(workload <= 16.7500f)
- {
- if(r_nk <= 1.6671f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- }
- else
- {
- if(r_mk <= 0.0027f)
- {
- if(r_mk <= 0.0014f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- else
- {
- if(workload <= 8.9500f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- }
- }
- else
- {
- if(workload <= 14.1500f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 0.0041f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- }
- }
- }
- }
- }
- else
- {
- if(workload <= 363.7000f)
- {
- if(r_mk <= 0.0031f)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 2, 1, 32, 0, 1, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 32, 0, 1, 0, 1, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 2, 1, 32, 0, 1, 0, 1, 0);
- }
- }
- }
- else
- {
- if(workload <= 1384.8000f)
- {
- if(workload <= 704.0000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 32, 0, 1, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 4, 0, 0, 0, 1, 1);
- }
- }
- else
- {
- if(workload <= 16761.6006f)
- {
- if(r_mn <= 187.1250f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 16, 0, 0, 0, 1, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 4, 0, 0, 0, 1, 1);
- }
- }
- else
- {
- if(r_mk <= 432.4630f)
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 16, 0, 0, 0, 1, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 16, 0, 1, 0, 1, 1);
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(m == 1)
- {
- if(r_mn <= 0.0038f)
- {
- if(workload <= 353.9000f)
- {
- if(workload <= 278.7000f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 0.0004f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 0.0030f)
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 8, 0, 1, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- }
- }
- }
- else
- {
- if(r_nk <= 1.9384f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 8, 0, 1, 1, 0, 1);
- }
- }
- }
- else
- {
- if(r_nk <= 1.0368f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- }
- }
- else
- {
- if(workload <= 1422.4000f)
- {
- if(workload <= 704.0000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- if(workload <= 1197.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 8, 0, 1, 1, 0, 1);
- }
- else
- {
- if(workload <= 1241.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 8, 0, 1, 1, 0, 1);
- }
- }
- }
- }
- else
- {
- if(workload <= 2769.6000f)
- {
- if(workload <= 1846.4000f)
- {
- if(r_mn <= 2.4927f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- }
- else
- {
- if(r_mn <= 0.6261f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 3.4453f)
- {
- if(r_mn <= 1.4135f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- }
- }
- }
- else
- {
- if(r_nk <= 0.0302f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 8, 0, 1, 1, 0, 1);
- }
- else
- {
- if(r_mk <= 181.3750f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- else
- {
- if(workload <= 28035.2002f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 808.6667f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- }
- }
- }
- }
- }
- }
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h
deleted file mode 100644
index 6a11ddb748..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_RHS_ONLY_VALHALL_H
-#define ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_RHS_ONLY_VALHALL_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** Valhall based OpenCL GEMMReshapedOnlyRHS configuration */
-class ClGemmDefaultConfigReshapedRhsOnlyValhall final : public IClGemmKernelConfig
-{
-public:
- /** Constructor
- *
- * @param[in] gpu GPU target
- */
- ClGemmDefaultConfigReshapedRhsOnlyValhall(GPUTarget gpu);
-
- // Inherited overridden method
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type) override;
-
-private:
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G78_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G78_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
- std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b);
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_DEFAULT_CONFIG_RESHAPED_RHS_ONLY_VALHALL_H */
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyBifrost.cpp b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyBifrost.cpp
deleted file mode 100644
index 7ed6b39f3e..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyBifrost.cpp
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-ClGemmDefaultConfigReshapedRhsOnlyBifrost::ClGemmDefaultConfigReshapedRhsOnlyBifrost(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigReshapedRhsOnlyBifrost::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G51(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G52(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G76(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G7x(&ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G76:
- func = configs_G76.get_function(data_type);
- break;
- case GPUTarget::G51:
- func = configs_G51.get_function(data_type);
- break;
- case GPUTarget::G52:
- func = configs_G52.get_function(data_type);
- break;
- default:
- func = configs_G7x.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n <= 2548)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 4, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 8, false, true, false, true, false);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 4, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- const bool is_workload_big = ((m * n * b) / 16) >= 2048;
-
- if(m == 1)
- {
- if(n >= 8192)
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 8, 1, h0, false, true, false, true, false);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- if(n <= 204)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true, false);
- }
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(16)), static_cast<int>(1));
- if(is_workload_big)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, h0, false, true, false, true);
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, true);
- }
- }
-
- // Get lhs_info/rhs_info in case of OpenCL image
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(16)), static_cast<int>(1));
- if(is_workload_big)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, h0, false, true, false, false, true);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, false, true, false, true, true);
- }
-
- const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, DataType::F32);
- const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, rhs_info_img);
- const TensorInfo tensor_reshaped_info(shape, 1, DataType::F32);
-
- // In case of vector by matrix or small workloads, we use the OpenCL buffer rather than the OpenCL image2d
- const bool use_cl_image2d = ((m == 1) || ((((m * n * b) / 16) < 2048) && n < 128)) ? false : true;
-
- if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info_img)) && use_cl_image2d)
- {
- return std::make_pair(lhs_info_img, rhs_info_img);
- }
- else
- {
- return std::make_pair(lhs_info_buf, rhs_info_buf);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(m == 1)
- {
- if(r_nk <= 0.4664f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 16, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, false, true, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(workload <= 274.4000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 16, false, false, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, false, false, true, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, false, false, true, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int n0 = n < 1280 ? 2 : 4;
- const unsigned int h0 = std::max(n / n0, 1U);
- return configure_lhs_rhs_info(m, n, 1, n0, 4, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- if(n > 2048)
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 4, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G52_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
-
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- if(m == 1)
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, false);
-
- if(r_mk <= 0.0026f)
- {
- if(r_nk <= 0.4664f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- else
- {
- if(r_mk <= 0.0148f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, false, true, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- else
- {
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 8, 4, 1, 2, false, false, false, false, false);
-
- if(workload <= 362.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 16, false, false, false, true, false);
- }
- else
- {
- if(r_mn <= 22.6067f)
- {
- if(workload <= 708.8000f)
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, false, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 16, false, false, false, false, false);
- }
- }
- else
- {
- if(r_nk <= 0.0917f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 16, false, false, false, true, false);
- }
- else
- {
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, false, false, false, true);
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
-
- if(m == 1)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, false, true, false, true, false);
- }
- else
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(workload <= 7449.60f)
- {
- if(workload <= 691.60f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 8, false, false, false, false, false);
- }
- else
- {
- if(workload <= 4155.20f)
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 32, false, false, false, false, false);
- }
- }
- }
- else
- {
- if(workload <= 16300.80f)
- {
- if(r_mn <= 44.56f)
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 8, 4, 4, 1, 1, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
- }
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, true, false, false, true);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F16);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int n0 = n < 1280 ? 2 : 4;
- const unsigned int h0 = std::max(n / n0, 1U);
- return configure_lhs_rhs_info(m, n, 1, n0, 8, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G7x_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(dot8_supported(CLKernelLibrary::get().get_device()))
- {
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 4, 1U);
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, h0, false, true, false, true);
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 2), static_cast<int>(128)), static_cast<int>(1));
- if(m == 1)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 1, h0, false, true, false, true);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G76_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, 2, false, true, false, true);
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyBifrost::configure_G51_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, false, true, false, true);
- }
- else
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 4, 2, 16, 1, h0, false, true, false, true);
- }
-}
-
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyValhall.cpp b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyValhall.cpp
deleted file mode 100644
index 4c6e633896..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultReshapedRhsOnlyValhall.cpp
+++ /dev/null
@@ -1,570 +0,0 @@
-/*
- * Copyright (c) 2020-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/GPUTarget.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/gpu/cl/kernels/gemm/ClGemmHelpers.h"
-
-#include <utility>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-using namespace arm_compute::misc::shape_calculator;
-
-ClGemmDefaultConfigReshapedRhsOnlyValhall::ClGemmDefaultConfigReshapedRhsOnlyValhall(GPUTarget gpu)
- : IClGemmKernelConfig(gpu)
-{
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure(unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type)
-{
- using ConfigurationFunctionExecutorPtr = std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> (ClGemmDefaultConfigReshapedRhsOnlyValhall::*)(unsigned int m, unsigned int n, unsigned int k,
- unsigned int b);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G77(&ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_u8);
-
- CLGEMMConfigArray<ConfigurationFunctionExecutorPtr> configs_G78(&ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f32,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f16,
- &ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_u8);
-
- ConfigurationFunctionExecutorPtr func = nullptr;
-
- switch(_target)
- {
- case GPUTarget::G78:
- func = configs_G78.get_function(data_type);
- break;
- case GPUTarget::G77:
- default:
- func = configs_G77.get_function(data_type);
- break;
- }
-
- ARM_COMPUTE_ERROR_ON_MSG(func == nullptr, "Data type not support for GEMM");
- return (this->*func)(m, n, k, b);
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- if(m == 1)
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(r_mk <= 0.0064484127797186375)
- {
- if(r_mn <= 0.0028273810748942196)
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
-
- const unsigned int h0 = std::max(n / 4, 1U);
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, 16, 0, 1, 0, 0, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 4, 1, h0, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 8, 0, 1, 0, 0, 0);
- }
- }
- else
- {
- if(r_mk <= 0.020312500186264515)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 4, 0, 1, 0, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, 16, 0, 1, 0, 1, 0);
- }
- }
- }
- else
- {
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
-
- if(workload <= 1999.2000122070312)
- {
- if(workload <= 747.1999816894531)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 2, 0, 0, 0, 1, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- if(r_mn <= 0.03348214365541935)
- {
- if(r_mk <= 0.028125000186264515)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 2, 0, 0, 0, 1, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 8, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- else
- {
- GEMMLHSMatrixInfo lhs_info_buf;
- GEMMRHSMatrixInfo rhs_info_buf;
- GEMMLHSMatrixInfo lhs_info_img;
- GEMMRHSMatrixInfo rhs_info_img;
- std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, 0, 1, 0, 0, 1);
- std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 16, 0, 1, 0, 1, 0);
-
- return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img),
- std::make_pair(lhs_info_buf, rhs_info_buf),
- n, k, b, DataType::F32);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- if(n <= 836.0)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, 0, 1, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, 0, 1, 0, 1, 0);
- }
- }
- else if(m < 128)
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(k >= 512)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, 0, 1, 0, 0);
- }
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(n >= 64)
- {
- return configure_lhs_rhs_info(m, n, 4, 8, 4, 1, h0, 0, 1, 0, 0);
- }
- else
- {
- if(k >= 512)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, h0, 0, 1, 0, 0);
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G77_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- ARM_COMPUTE_UNUSED(k);
- ARM_COMPUTE_UNUSED(b);
-
- if(m == 1)
- {
- const unsigned int h0 = std::max(n / 2, 1U);
- return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, 0, 1, 0, 1);
- }
- else
- {
- const int h0 = std::max(std::min(static_cast<int>(n / 4), static_cast<int>(256)), static_cast<int>(1));
- if(m >= 28)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 16, 1, h0, 0, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 16, 1, h0, 0, 1, 0, 1);
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(m == 1)
- {
- if(workload <= 278.7000f)
- {
- if(workload <= 7.5000f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mn <= 0.0031f)
- {
- if(workload <= 256.6000f)
- {
- if(workload <= 16.7500f)
- {
- if(r_nk <= 1.6671f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- }
- else
- {
- if(r_mk <= 0.0027f)
- {
- if(r_mk <= 0.0014f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- else
- {
- if(workload <= 8.9500f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- }
- }
- else
- {
- if(workload <= 14.1500f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 0.0041f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 2, 1, 32, 0, 0, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 2, 0, 1, 1, 0, 0);
- }
- }
- }
- }
- }
- }
- else
- {
- if(workload <= 363.7000f)
- {
- if(r_mk <= 0.0031f)
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 2, 1, 32, 0, 1, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 4, 1, 32, 0, 1, 0, 1, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 4, 2, 1, 32, 0, 1, 0, 1, 0);
- }
- }
- }
- else
- {
- if(workload <= 1384.8000f)
- {
- if(workload <= 704.0000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 4, 1, 32, 0, 1, 0, 1, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 4, 0, 0, 0, 1, 1);
- }
- }
- else
- {
- if(workload <= 16761.6006f)
- {
- if(r_mn <= 187.1250f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 16, 0, 0, 0, 1, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 4, 0, 0, 0, 1, 1);
- }
- }
- else
- {
- if(r_mk <= 432.4630f)
- {
- return configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 16, 0, 0, 0, 1, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 16, 0, 1, 0, 1, 1);
- }
- }
- }
- }
-}
-
-std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> ClGemmDefaultConfigReshapedRhsOnlyValhall::configure_G78_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b)
-{
- const float r_mn = static_cast<float>(m) / static_cast<float>(n);
- const float r_mk = static_cast<float>(m) / static_cast<float>(k);
- const float r_nk = static_cast<float>(n) / static_cast<float>(k);
- const float workload = (static_cast<float>(m) * static_cast<float>(n) * static_cast<float>(b)) / 20.0f;
-
- if(m == 1)
- {
- if(r_mn <= 0.0038f)
- {
- if(workload <= 353.9000f)
- {
- if(workload <= 278.7000f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 0.0004f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 0.0030f)
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 8, 0, 1, 1, 0, 1);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- }
- }
- }
- else
- {
- if(r_nk <= 1.9384f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 8, 4, 1, 8, 0, 1, 1, 0, 1);
- }
- }
- }
- else
- {
- if(r_nk <= 1.0368f)
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, 32, 0, 0, 1, 0, 0);
- }
- }
- }
- else
- {
- if(workload <= 1422.4000f)
- {
- if(workload <= 704.0000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 32, 0, 0, 1, 0, 0);
- }
- else
- {
- if(workload <= 1197.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 8, 0, 1, 1, 0, 1);
- }
- else
- {
- if(workload <= 1241.6000f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 8, 0, 1, 1, 0, 1);
- }
- }
- }
- }
- else
- {
- if(workload <= 2769.6000f)
- {
- if(workload <= 1846.4000f)
- {
- if(r_mn <= 2.4927f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- }
- else
- {
- if(r_mn <= 0.6261f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 3.4453f)
- {
- if(r_mn <= 1.4135f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- }
- }
- }
- else
- {
- if(r_nk <= 0.0302f)
- {
- return configure_lhs_rhs_info(m, n, 2, 4, 8, 1, 8, 0, 1, 1, 0, 1);
- }
- else
- {
- if(r_mk <= 181.3750f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- else
- {
- if(workload <= 28035.2002f)
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- else
- {
- if(r_mk <= 808.6667f)
- {
- return configure_lhs_rhs_info(m, n, 4, 4, 8, 1, 32, 0, 1, 1, 0, 0);
- }
- else
- {
- return configure_lhs_rhs_info(m, n, 2, 8, 8, 1, 16, 0, 1, 1, 0, 0);
- }
- }
- }
- }
- }
- }
- }
-}
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmReshapedOnlyRhsKernelConfig.h b/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmReshapedOnlyRhsKernelConfig.h
deleted file mode 100644
index 8fd71276a0..0000000000
--- a/src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmReshapedOnlyRhsKernelConfig.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CL_GEMM_RESHAPED_ONLY_RHS_KERNEL_CONFIGURATION_H
-#define ARM_COMPUTE_CL_GEMM_RESHAPED_ONLY_RHS_KERNEL_CONFIGURATION_H
-
-#include "src/core/gpu/cl/kernels/gemm/IClGemmKernelConfig.h"
-#include "src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyBifrost.h"
-#include "src/core/gpu/cl/kernels/gemm/reshaped_only_rhs/ClGemmDefaultConfigReshapedRhsOnlyValhall.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace opencl
-{
-namespace kernels
-{
-namespace gemm
-{
-/** CLGEMMReshapedOnlyRHS factory class */
-class ClGemmReshapedOnlyRhsKernelConfigurationFactory final
-{
-public:
- /** Static method to call the CLGEMMReshapedOnlyRHS kernel configuration class accordingly with the GPU target
- *
- * @param[in] gpu GPU target
- *
- * @return CLGEMMReshapedOnlyRHS kernel configuration class
- */
- static std::unique_ptr<IClGemmKernelConfig> create(GPUTarget gpu)
- {
- switch(get_arch_from_target(gpu))
- {
- case GPUTarget::MIDGARD:
- case GPUTarget::BIFROST:
- return std::make_unique<ClGemmDefaultConfigReshapedRhsOnlyBifrost>(gpu);
- case GPUTarget::VALHALL:
- return std::make_unique<ClGemmDefaultConfigReshapedRhsOnlyValhall>(gpu);
- default:
- ARM_COMPUTE_ERROR("Not supported GPU target");
- }
- }
-};
-} // namespace gemm
-} // namespace kernels
-} // namespace opencl
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_CL_GEMM_RESHAPED_ONLY_RHS_KERNEL_CONFIGURATION_H */