aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp2
-rw-r--r--SConscript1
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h29
-rw-r--r--filelist.json1
-rw-r--r--src/core/CL/cl_kernels/nhwc/indirect_convolution.cl93
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp5
-rw-r--r--src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp158
-rw-r--r--src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h80
-rw-r--r--tests/validation/CL/IndirectConv2dAddressPrecalculation.cpp89
-rw-r--r--tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h123
-rw-r--r--tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp99
-rw-r--r--tests/validation/reference/IndirectConv2dAddressPrecalculation.h44
12 files changed, 724 insertions, 0 deletions
diff --git a/Android.bp b/Android.bp
index 69a0affdb2..90d34179d6 100644
--- a/Android.bp
+++ b/Android.bp
@@ -108,6 +108,7 @@ opencl_srcs = [
"src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl",
"src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl",
"src/core/CL/cl_kernels/nhwc/im2col.cl",
+ "src/core/CL/cl_kernels/nhwc/indirect_convolution.cl",
"src/core/CL/cl_kernels/nhwc/normalization_layer.cl",
"src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer.cl",
"src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer_quantized.cl",
@@ -637,6 +638,7 @@ cc_library_static {
"src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp",
"src/gpu/cl/kernels/ClHeightConcatenateKernel.cpp",
"src/gpu/cl/kernels/ClIm2ColKernel.cpp",
+ "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp",
"src/gpu/cl/kernels/ClMulKernel.cpp",
"src/gpu/cl/kernels/ClPermuteKernel.cpp",
"src/gpu/cl/kernels/ClPool2dKernel.cpp",
diff --git a/SConscript b/SConscript
index 908fbff626..45816cad4a 100644
--- a/SConscript
+++ b/SConscript
@@ -449,6 +449,7 @@ if env['opencl'] and env['embed_kernels']:
'src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl',
'src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl',
'src/core/CL/cl_kernels/nhwc/im2col.cl',
+ 'src/core/CL/cl_kernels/nhwc/indirect_convolution.cl',
'src/core/CL/cl_kernels/nhwc/normalization_layer.cl',
'src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer.cl',
'src/core/CL/cl_kernels/nhwc/normalize_planar_yuv_layer_quantized.cl',
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 84c0ee5034..9e7c981814 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -746,6 +746,35 @@ inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, cons
return compute_deep_convolution_shape(input.tensor_shape(), input.data_layout(), weights.tensor_shape(), conv_info);
}
+/** Calculate the indirect buffer output shape used by the indirect convolution function
+ *
+ * @param[in] input_shape Input tensor shape
+ * @param[in] input_data_layout Input data layout
+ * @param[in] weights_shape Weights tensor shape
+ * @param[in] conv_info Contains padding and stride information
+ * @param[in] desc Contains the direct/indirect convolution compute arguments, such as the tiling dimensions
+ *
+ * @return the calculated shape
+ */
+inline TensorShape compute_indirect_buffer_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info,
+ const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(input_data_layout != DataLayout::NHWC, "The data layout can only be NHWC");
+ ARM_COMPUTE_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8");
+
+ const unsigned int m0 = desc.m0;
+ const unsigned int kw = weights_shape[1];
+ const unsigned int kh = weights_shape[2];
+
+ TensorShape output_conv2d_shape = compute_deep_convolution_shape(input_shape, input_data_layout, weights_shape, conv_info);
+
+ const unsigned int output_w = m0 * kw * kh;
+ const unsigned int output_h = DIV_CEIL(output_conv2d_shape[1] * output_conv2d_shape[2], m0);
+ const unsigned int output_b = output_conv2d_shape[3];
+
+ return TensorShape(output_w, output_h, output_b);
+}
+
/** Calculate the min/max shape output shape of a tensor
*
* @param[in] input Input tensor info
diff --git a/filelist.json b/filelist.json
index 21bc35c644..513ee6e393 100644
--- a/filelist.json
+++ b/filelist.json
@@ -288,6 +288,7 @@
"src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp",
"src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp",
"src/gpu/cl/kernels/ClIm2ColKernel.cpp",
+ "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp",
"src/gpu/cl/kernels/ClCol2ImKernel.cpp",
"src/gpu/cl/operators/ClConv2d.cpp",
"src/gpu/cl/operators/ClDirectConv2d.cpp",
diff --git a/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
new file mode 100644
index 0000000000..07c7212e77
--- /dev/null
+++ b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+#include "tile_helpers.h"
+
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the indirect convolution 2d indirect buffer.
+ *
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
+ * @note The kernel width must be passed at compile time using -DWEI_CONV_WIDTH (e.g. -DWEI_CONV_WIDTH=9)
+ * @note The spatial dimensions of the source tensor used by conv2d must be passed at compile time using -DSRC_CONV_WIDTH and -DSRC_CONV_HEIGHT (e.g. -DSRC_CONV_WIDTH=96, -DSRC_CONV_HEIGHT=64)
+ * @note The width dimension of the destination tensor produced by conv2d must be passed at compile time using -DDST_CONV_WIDTH (e.g. -DDST_CONV_WIDTH=96)
+ * @note The tensor type ("BUFFER" only) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
+ *
+ * @param[out] dst_img CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: INT32
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+//! @endcond
+__kernel void indirect_convolution_address_precalculation(
+ TENSOR4D_T(dst, DST_TENSOR_TYPE))
+{
+ const int x = get_global_id(0);
+ const int y = get_global_id(1);
+ const int z = get_global_id(2);
+
+ // Note: WIDTH = M0 x KernelWidth x KernelHeight
+
+ // m index
+ const int mi = x % M0;
+ // Kernel index
+ const int ki = x / M0;
+ // Kernel width coordinate
+ const int xk = ki % WEI_CONV_WIDTH;
+ // kernel height coordinate
+ const int yk = ki / WEI_CONV_WIDTH;
+
+ TILE(DST_DATA_TYPE, 1, 1, xi);
+ TILE(DST_DATA_TYPE, 1, 1, yi);
+ TILE(DST_DATA_TYPE, 1, 1, my);
+
+ const int mout = y * M0;
+
+ xi[0].s[0] = ((mout + mi) % DST_CONV_WIDTH) * STRIDE_X;
+ yi[0].s[0] = ((mout + mi) / DST_CONV_WIDTH) * STRIDE_Y;
+ xi[0].s[0] -= PAD_LEFT;
+ yi[0].s[0] -= PAD_TOP;
+
+ const int x_s = xi[0].s[0] + xk;
+ const int y_s = yi[0].s[0] + yk;
+ my[0].s[0] = x_s + y_s * SRC_CONV_WIDTH;
+ my[0].s[0] = my[0].s[0] + z * (int)(SRC_CONV_WIDTH * SRC_CONV_HEIGHT);
+ my[0].s[0] = select(-1, my[0].s[0], x_s >= 0);
+ my[0].s[0] = select(-1, my[0].s[0], x_s < SRC_CONV_WIDTH);
+ my[0].s[0] = select(-1, my[0].s[0], y_s >= 0);
+ my[0].s[0] = select(-1, my[0].s[0], y_s < SRC_CONV_HEIGHT);
+
+ VSTORE(1)
+ (my[0].s[0], 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DST_DATA_TYPE) + y * dst_stride_y + z * dst_stride_z));
+} \ No newline at end of file
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index 4e036399db..0ff7dfe1f3 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -433,6 +433,7 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "im2col3x3_nhwc", "nhwc/im2col.cl" },
{ "im2col9x9_nhwc", "nhwc/im2col.cl" },
{ "im2col_generic_nhwc", "nhwc/im2col.cl" },
+ { "indirect_convolution_address_precalculation", "nhwc/indirect_convolution.cl" },
{ "normalization_layer_cross_map_nhwc", "nhwc/normalization_layer.cl" },
{ "normalization_layer_in_map_nhwc", "nhwc/normalization_layer.cl" },
{ "normalize_planar_yuv_layer_nhwc", "nhwc/normalize_planar_yuv_layer.cl" },
@@ -908,6 +909,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/nhwc/im2col.clembed"
},
{
+ "nhwc/indirect_convolution.cl",
+#include "./cl_kernels/nhwc/indirect_convolution.clembed"
+ },
+ {
"nhwc/batchnormalization_layer.cl",
#include "./cl_kernels/nhwc/batchnormalization_layer.clembed"
},
diff --git a/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp
new file mode 100644
index 0000000000..95186fe106
--- /dev/null
+++ b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h"
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/CL/CLValidate.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "support/Cast.h"
+#include "support/StringSupport.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) != src->dimension(0), "Weights feature map dimension should match the respective src's one");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8");
+
+ // Checks performed when dst is configured
+ if(dst->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(),
+ misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(),
+ src->data_layout(),
+ weights->tensor_shape(),
+ conv_info,
+ desc));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32);
+ }
+
+ return Status{};
+}
+} // namespace
+
+ClIndirectConv2dAddressPrecalculationKernel::ClIndirectConv2dAddressPrecalculationKernel()
+{
+ _type = CLKernelType::ELEMENTWISE;
+}
+
+void ClIndirectConv2dAddressPrecalculationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, dst, conv_info, desc));
+
+ constexpr unsigned int width_idx = 1;
+ constexpr unsigned int height_idx = 2;
+
+ // Get dst shape
+ TensorShape output_shape = misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(),
+ src->data_layout(),
+ weights->tensor_shape(),
+ conv_info,
+ desc);
+
+ TensorShape output_conv_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*dst, output_shape, 1, DataType::S32);
+
+ // Configure kernel window
+ Window win;
+
+ // Create window and update padding
+ win = calculate_max_window(output_shape, Steps(1));
+
+ ICLKernel::configure_internal(win);
+
+ std::stringstream kernel_name;
+ CLBuildOptions build_options;
+
+ kernel_name << "indirect_convolution_address_precalculation";
+
+ const unsigned int pad_left = conv_info.pad_left();
+ const unsigned int pad_top = conv_info.pad_top();
+ const unsigned int conv_stride_x = std::get<0>(conv_info.stride());
+ const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
+ const auto dst_data_type = dst->data_type();
+
+ build_options.add_option("-DSRC_CONV_WIDTH=" + support::cpp11::to_string(src->dimension(width_idx)));
+ build_options.add_option("-DSRC_CONV_HEIGHT=" + support::cpp11::to_string(src->dimension(height_idx)));
+ build_options.add_option("-DDST_CONV_WIDTH=" + support::cpp11::to_string(output_conv_shape[width_idx]));
+ build_options.add_option("-DDST_CONV_HEIGHT=" + support::cpp11::to_string(output_conv_shape[height_idx]));
+ build_options.add_option("-DDST_TENSOR_TYPE=BUFFER");
+ build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst_data_type));
+ build_options.add_option("-DWEI_CONV_WIDTH=" + support::cpp11::to_string(weights->dimension(width_idx)));
+ build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x));
+ build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_stride_y));
+ build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
+ build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
+ build_options.add_option("-DM0=" + support::cpp11::to_string(desc.m0));
+
+ _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
+
+ // Since this kernel should be called only once, we do not need to set the config_id for tuning
+}
+
+Status ClIndirectConv2dAddressPrecalculationKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, dst, conv_info, desc));
+ return Status{};
+}
+
+void ClIndirectConv2dAddressPrecalculationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ // Get initial windows
+ const Window slice = window.first_slice_window_3D();
+
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ unsigned int idx = 0;
+ add_4d_tensor_nhwc_argument(idx, dst);
+ enqueue(queue, *this, slice);
+}
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h
new file mode 100644
index 0000000000..ff7f4be147
--- /dev/null
+++ b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_KERNEL_H
+#define ARM_COMPUTE_CL_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+// Forward declarations
+struct DirectConvComputeKernelInfo;
+
+namespace opencl
+{
+namespace kernels
+{
+/** Interface for the direct convolution kernel. */
+class ClIndirectConv2dAddressPrecalculationKernel : public IClKernel
+{
+public:
+ ClIndirectConv2dAddressPrecalculationKernel();
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClIndirectConv2dAddressPrecalculationKernel);
+ /** Set the src, weights, biases and dst tensors info.
+ *
+ * @note: When M0 is 5,6,7, the kernel rounds up M0 to the nearest power of two. Therefore, eight. The reason behind
+ * this implementation detail is because we can exploit native opencl stores in the kernel.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The src tensor info to convolve. 3 lower dimensions represent a single src [IFM, width, height],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: F16/F32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [IFM, kernel_x, kernel_y, OFM].
+ * The 1st dimension must be the same as the src's volume 1st dimension.
+ * Data type supported:Same as @p src.
+ * @param[out] dst Output tensor info where to store the precalculated offsets. Data types supported: S32.
+ * The output is a 3D tensor with the following dimensions: [M0 x Kw x Kh, ceil(M/M0), batch-size], where:
+ * Kw=Kernel width, Kh=Kernel height, M0=number of rows processed by each workitem, and M=dst_width x dst_height.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] desc Direct convolution descriptor used to build the NHWC direct/indirect convolution kernel.
+ */
+ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClIndirectConv2dAddressPreCalculationKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_KERNEL_H */
diff --git a/tests/validation/CL/IndirectConv2dAddressPrecalculation.cpp b/tests/validation/CL/IndirectConv2dAddressPrecalculation.cpp
new file mode 100644
index 0000000000..67f70685d1
--- /dev/null
+++ b/tests/validation/CL/IndirectConv2dAddressPrecalculation.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
+
+using CLIndirectConv2dAddressPrecalculation = CLSynthetizeOperator<ClIndirectConv2dAddressPrecalculationKernel>;
+
+using CLIndirectConv2dAddressPrecalculationFixture = IndirectConv2dAddressPrecalculationValidationFixture<CLTensor, CLAccessor, CLIndirectConv2dAddressPrecalculation>;
+
+// *INDENT-OFF*
+// clang-format off
+/** Data types */
+
+namespace
+{
+const auto src_w_values = framework::dataset::make("src_w", {91});
+const auto src_h_values = framework::dataset::make("src_h", {103});
+const auto src_b_values = framework::dataset::make("src_b", {1, 2});
+const auto wei_w_values = framework::dataset::make("wei_w", {3, 5});
+const auto wei_h_values = framework::dataset::make("wei_h", {1, 6});
+const auto pad_values = framework::dataset::make("pad", {1, 2, 3});
+const auto stride_values = framework::dataset::make("stride", {1, 2});
+const auto m0_values = framework::dataset::make("M0", { 1, 2, 4, 5, 7 });
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(IndirectConv2dAddressPrecalculation)
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLIndirectConv2dAddressPrecalculationFixture, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(src_w_values,
+ src_h_values),
+ src_b_values),
+ wei_w_values),
+ wei_h_values),
+ pad_values),
+ stride_values),
+ m0_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+TEST_SUITE_END() // IndirectConv2dAddressPrecalculation
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h b/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h
new file mode 100644
index 0000000000..f595a8663d
--- /dev/null
+++ b/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE
+#define ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/Globals.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/IndirectConv2dAddressPrecalculation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AccessorType, typename OperatorType>
+class IndirectConv2dAddressPrecalculationValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(unsigned int src_w,
+ unsigned int src_h,
+ unsigned int src_b,
+ unsigned int wei_w,
+ unsigned int wei_h,
+ unsigned int pad,
+ unsigned int stride,
+ unsigned int m0)
+ {
+ DirectConvComputeKernelInfo desc;
+ desc.m0 = m0;
+ desc.n0 = 1; // Not used by the kernel
+ desc.k0 = 1; // Not used by the kernel
+ desc.export_weights_to_cl_image = false; // Not used by the kernel
+
+ const PadStrideInfo conv_info(stride, stride, pad, pad);
+
+ const TensorShape shape_conv_src(23, // The input channels are not used by the kernel
+ src_w,
+ src_h,
+ src_b);
+
+ const TensorShape shape_conv_wei(23, // The input channels are not used by the kernel
+ wei_w,
+ wei_h,
+ 23 // The output channels are not used by the kernel
+ );
+
+ // The result of the kernel does not change with the datatype. Hence, we can fix it to Fp16 for validation purposes
+ const DataType data_type = DataType::F16;
+
+ _target = compute_target(shape_conv_src, shape_conv_wei, data_type, conv_info, desc);
+ _reference = compute_reference(shape_conv_src, shape_conv_wei, data_type, conv_info, desc);
+ }
+
+protected:
+ TensorType compute_target(TensorShape shape_conv_src, TensorShape shape_conv_wei, DataType data_type, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+ {
+ TensorInfo src_conv_info(shape_conv_src, 1, data_type, DataLayout::NHWC);
+ TensorInfo wei_conv_info(shape_conv_wei, 1, data_type, DataLayout::NHWC);
+ TensorType dst;
+
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ OperatorType func;
+ func.configure(&src_conv_info, &wei_conv_info, dst.info(), conv_info, desc);
+
+ add_padding_x({ &dst });
+
+ // Allocate tensors
+ dst.allocator()->allocate();
+
+ // Compute GEMM LHS matrix reshape function
+ ITensorPack tensors = { { ACL_DST, &dst } };
+ func.run(tensors);
+
+ return dst;
+ }
+
+ SimpleTensor<int32_t> compute_reference(TensorShape shape_conv_src, TensorShape shape_conv_wei, DataType data_type, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc)
+ {
+ ARM_COMPUTE_UNUSED(data_type);
+ TensorShape shape_out = compute_indirect_buffer_shape(shape_conv_src, DataLayout::NHWC, shape_conv_wei, conv_info, desc);
+ TensorShape output_conv_shape = compute_deep_convolution_shape(shape_conv_src, DataLayout::NHWC, shape_conv_wei, conv_info);
+
+ return reference::indirect_conv2d_addr_precalculation(shape_conv_src, shape_conv_wei, output_conv_shape, shape_out, conv_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<int32_t> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE */ \ No newline at end of file
diff --git a/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp b/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp
new file mode 100644
index 0000000000..240dcb686c
--- /dev/null
+++ b/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "IndirectConv2dAddressPrecalculation.h"
+
+#include "arm_compute/core/Types.h"
+
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+SimpleTensor<int32_t> indirect_conv2d_addr_precalculation(const TensorShape &shape_conv_src, const TensorShape &shape_conv_wei, const TensorShape &shape_conv_dst, const TensorShape &shape_dst,
+ const PadStrideInfo &conv_info)
+{
+ SimpleTensor<int32_t> out{ shape_dst, DataType::S32 };
+
+ constexpr unsigned int width_idx = 1;
+ constexpr unsigned int heigh_idx = 2;
+
+ const int src_conv_width = static_cast<int32_t>(shape_conv_src[width_idx]); // NHWC
+ const int src_conv_height = static_cast<int32_t>(shape_conv_src[heigh_idx]); // NHWC
+ const int dst_conv_width = static_cast<int32_t>(shape_conv_dst[width_idx]); // NHWC
+ const int wei_conv_width = static_cast<int32_t>(shape_conv_wei[width_idx]); // NHWC
+ const int wei_conv_height = static_cast<int32_t>(shape_conv_wei[heigh_idx]); // NHWC
+ const int dst_width = static_cast<int32_t>(shape_dst[0]);
+ const int dst_height = static_cast<int32_t>(shape_dst[1]);
+ const int dst_batch = static_cast<int32_t>(shape_dst[2]);
+ const int ks = wei_conv_width * wei_conv_height;
+ const int stride_x = static_cast<int32_t>(conv_info.stride().first);
+ const int stride_y = static_cast<int32_t>(conv_info.stride().second);
+ const int pad_left = static_cast<int32_t>(conv_info.pad_left());
+ const int pad_top = static_cast<int32_t>(conv_info.pad_top());
+
+ const int m0 = dst_width / ks;
+
+ for(int z = 0; z < dst_batch; ++z)
+ {
+ for(int y = 0; y < dst_height; ++y)
+ {
+ const int mout = y * m0;
+ for(int ki = 0; ki < ks; ++ki)
+ {
+ const int xk = ki % wei_conv_width;
+ const int yk = ki / wei_conv_width;
+ for(int mi = 0; mi < m0; ++mi)
+ {
+ int xi = ((mout + mi) % dst_conv_width) * stride_x;
+ int yi = ((mout + mi) / dst_conv_width) * stride_y;
+ xi -= pad_left;
+ yi -= pad_top;
+ const int x_s = xi + xk;
+ const int y_s = yi + yk;
+ int my = x_s + y_s * src_conv_width;
+ my = my + z * src_conv_width * src_conv_height;
+ my = x_s >= 0 ? my : -1;
+ my = x_s < src_conv_width ? my : -1;
+ my = y_s >= 0 ? my : -1;
+ my = y_s < src_conv_height ? my : -1;
+
+ const unsigned int addr_out = mi + ki * m0 + y * (dst_width) + z * (dst_width * dst_height);
+
+ out[addr_out] = my;
+ }
+ }
+ }
+ }
+
+ return out;
+}
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/reference/IndirectConv2dAddressPrecalculation.h b/tests/validation/reference/IndirectConv2dAddressPrecalculation.h
new file mode 100644
index 0000000000..f4a90dfd9f
--- /dev/null
+++ b/tests/validation/reference/IndirectConv2dAddressPrecalculation.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_H
+#define ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_H
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+SimpleTensor<int32_t> indirect_conv2d_addr_precalculation(const TensorShape &shape_conv_src, const TensorShape &shape_conv_wei, const TensorShape &shape_conv_out, const TensorShape &shape_out,
+ const PadStrideInfo &conv_info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_H */ \ No newline at end of file