aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp3
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h120
-rw-r--r--filelist.json3
-rw-r--r--src/core/CL/cl_kernels/nhwc/direct_convolution.cl37
-rw-r--r--src/core/CL/cl_kernels/nhwc/indirect_convolution.cl224
-rw-r--r--src/core/CL/cl_kernels/nhwc/transposed_convolution.cl43
-rw-r--r--src/core/CL/cl_kernels/tile_helpers.h20
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp1
-rw-r--r--src/gpu/cl/kernels/ClDirectConv2dKernel.cpp1
-rw-r--r--src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp7
-rw-r--r--src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp305
-rw-r--r--src/gpu/cl/kernels/ClIndirectConv2dKernel.h82
-rw-r--r--src/gpu/cl/operators/ClIndirectConv2d.cpp150
-rw-r--r--src/gpu/cl/operators/ClIndirectConv2d.h110
-rw-r--r--src/runtime/CL/functions/CLIndirectConvolutionLayer.cpp86
-rw-r--r--tests/validation/CL/IndirectConvolutionLayer.cpp268
-rw-r--r--tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp3
18 files changed, 1413 insertions, 51 deletions
diff --git a/Android.bp b/Android.bp
index 20afbfc1d6..46cdb06a25 100644
--- a/Android.bp
+++ b/Android.bp
@@ -649,6 +649,7 @@ cc_library_static {
"src/gpu/cl/kernels/ClHeightConcatenateKernel.cpp",
"src/gpu/cl/kernels/ClIm2ColKernel.cpp",
"src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp",
+ "src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp",
"src/gpu/cl/kernels/ClMulKernel.cpp",
"src/gpu/cl/kernels/ClPermuteKernel.cpp",
"src/gpu/cl/kernels/ClPool2dKernel.cpp",
@@ -697,6 +698,7 @@ cc_library_static {
"src/gpu/cl/operators/ClGemmConv2d.cpp",
"src/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp",
"src/gpu/cl/operators/ClGemmLowpOutputStage.cpp",
+ "src/gpu/cl/operators/ClIndirectConv2d.cpp",
"src/gpu/cl/operators/ClLogicalNot.cpp",
"src/gpu/cl/operators/ClMul.cpp",
"src/gpu/cl/operators/ClPRelu.cpp",
@@ -772,6 +774,7 @@ cc_library_static {
"src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp",
"src/runtime/CL/functions/CLGather.cpp",
"src/runtime/CL/functions/CLGenerateProposalsLayer.cpp",
+ "src/runtime/CL/functions/CLIndirectConvolutionLayer.cpp",
"src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp",
"src/runtime/CL/functions/CLL2NormalizeLayer.cpp",
"src/runtime/CL/functions/CLLSTMLayer.cpp",
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index f42da5801c..e37134d454 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -69,6 +69,7 @@
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
#include "arm_compute/runtime/CL/functions/CLGather.h"
#include "arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h"
+#include "arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
#include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
#include "arm_compute/runtime/CL/functions/CLLSTMLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h
new file mode 100644
index 0000000000..8185f8df78
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CLINDIRECTCONVOLUTIONLAYER_H
+#define ARM_COMPUTE_CLINDIRECTCONVOLUTIONLAYER_H
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class CLCompileContext;
+class ICLTensor;
+class ITensorInfo;
+
+/** Basic function to run the indirect convolution function
+ */
+class CLIndirectConvolutionLayer : public IFunction
+{
+public:
+ /** Constructor */
+ CLIndirectConvolutionLayer();
+ /** Destructor */
+ ~CLIndirectConvolutionLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLIndirectConvolutionLayer(const CLIndirectConvolutionLayer &) = delete;
+ /** Default move constructor */
+ CLIndirectConvolutionLayer(CLIndirectConvolutionLayer &&);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLIndirectConvolutionLayer &operator=(const CLIndirectConvolutionLayer &) = delete;
+ /** Default move assignment operator */
+ CLIndirectConvolutionLayer &operator=(CLIndirectConvolutionLayer &&);
+ /** Set the input and output tensors.
+ *
+ * Valid data layouts:
+ * - NHWC
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |src2 |dst |
+ * |:--------------|:--------------|:------|:--------------|
+ * |F16 |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |F32 |
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input,
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type.
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output, while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Set the input and output tensors.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input,
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type.
+ * @param[out] output Destination tensor. 3 lower dimensions represent a single output, while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref CLIndirectConvolutionLayer
+ *
+ * @param[in] input Source tensor. 3 lower dimensions represent a single input,
+ * while every optional dimension from 4 and above represent a batch of inputs.
+ * Data types supported: F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions. Data type supported:Same as @p input.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p input data type.
+ * @param[in] output Destination tensor. 3 lower dimensions represent a single output, while the rest represent batch of outputs.
+ * Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
+};
+}
+#endif /* ARM_COMPUTE_CLINDIRECTCONVOLUTIONLAYER_H */
diff --git a/filelist.json b/filelist.json
index 42fd4182e3..beb6f77daf 100644
--- a/filelist.json
+++ b/filelist.json
@@ -288,17 +288,20 @@
"src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp",
"src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp",
"src/gpu/cl/kernels/ClIm2ColKernel.cpp",
+ "src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp",
"src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp",
"src/gpu/cl/kernels/ClCol2ImKernel.cpp",
"src/gpu/cl/operators/ClConv2d.cpp",
"src/gpu/cl/operators/ClDirectConv2d.cpp",
"src/gpu/cl/operators/ClGemmConv2d.cpp",
+ "src/gpu/cl/operators/ClIndirectConv2d.cpp",
"src/gpu/cl/operators/ClWinogradConv2d.cpp",
"src/gpu/cl/kernels/ClWeightsReshapeKernel.cpp",
"src/runtime/CL/functions/CLConvolutionLayer.cpp",
"src/runtime/CL/functions/CLDirectConvolutionLayer.cpp",
"src/runtime/CL/functions/CLFFTConvolutionLayer.cpp",
"src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp",
+ "src/runtime/CL/functions/CLIndirectConvolutionLayer.cpp",
"src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp"
]
}
diff --git a/src/core/CL/cl_kernels/nhwc/direct_convolution.cl b/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
index 2e7ed5a4ca..8be8e00f0a 100644
--- a/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
+++ b/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
@@ -53,7 +53,7 @@
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
* @note The zero value must be passed at compile time using -DZERO_VALUE (e.g. -DZERO_VALUE=0)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
- * - M0 = 1, 2, 3, 4, 5, .... n
+ * - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
*
@@ -137,16 +137,16 @@ __kernel void direct_convolution_nhwc(
// .v = access the whole vector (OpenCL vector)
// .s[x] = access the vector element at position x (scalar access)
- TILE(int, M0, 1, xi);
- TILE(int, M0, 1, yi);
+ TILE(int, 1, M0, xi);
+ TILE(int, 1, M0, yi);
// Convert the linear index to coordinate
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- xi[i].v = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
- yi[i].v = ((mout + i) / _IDST_WIDTH) * STRIDE_Y;
- xi[i].v -= PAD_LEFT;
- yi[i].v -= PAD_TOP;
+ xi[0].s[i] = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
+ yi[0].s[i] = ((mout + i) / _IDST_WIDTH) * STRIDE_Y;
+ xi[0].s[i] -= PAD_LEFT;
+ yi[0].s[i] -= PAD_TOP;
})
// Initialize the accumulators
@@ -162,18 +162,18 @@ __kernel void direct_convolution_nhwc(
int xk = i % _IWEI_WIDTH;
int yk = i / _IWEI_WIDTH;
- TILE(int, M0, 1, my);
+ TILE(int, 1, M0, my);
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- int x_s = xi[i].v + xk;
- int y_s = yi[i].v + yk;
- my[i].v = x_s + y_s *_ISRC_WIDTH;
- my[i].v = my[i].v + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
- my[i].v = select(-1, my[i].v, x_s >= 0);
- my[i].v = select(-1, my[i].v, x_s < _ISRC_WIDTH);
- my[i].v = select(-1, my[i].v, y_s >= 0);
- my[i].v = select(-1, my[i].v, y_s < _ISRC_HEIGHT);
+ int x_s = xi[0].s[i] + xk;
+ int y_s = yi[0].s[i] + yk;
+ my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
+ my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
+ my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
+ my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
})
int ck = 0;
@@ -189,7 +189,7 @@ __kernel void direct_convolution_nhwc(
})
// Load tile from the src tensor
- T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, my, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
@@ -202,7 +202,6 @@ __kernel void direct_convolution_nhwc(
T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, a, b, c);
}
- // We voluntarily use SRC_CHANNELS rather than _DSRC_CHANNELS
// This #if directive should be removed in case of dynamic tensor support
#if defined(LEFTOVER_LOOP)
// Left-over accumulations
@@ -223,7 +222,7 @@ __kernel void direct_convolution_nhwc(
})
// Load tile from the src tensor
- T_LOAD_NHWC_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, xi, yi, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
diff --git a/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
index 07c7212e77..c88f0034c5 100644
--- a/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
+++ b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
@@ -21,13 +21,16 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
+#include "activation_float_helpers.h"
#include "helpers.h"
#include "tile_helpers.h"
+#if defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
//! @cond Doxygen_Suppress
/** OpenCL kernel to compute the indirect convolution 2d indirect buffer.
*
+ * @note This kernel only works for unit batch_size
+ *
* @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
* @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
* @note The kernel width must be passed at compile time using -DWEI_CONV_WIDTH (e.g. -DWEI_CONV_WIDTH=9)
@@ -38,7 +41,7 @@
* @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
*
- * @param[out] dst_img CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
+ * @param[out] dst_img (Not supported) CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
* @param[out] dst_ptr Pointer to the destination tensor. Supported data type: INT32
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
@@ -90,4 +93,219 @@ __kernel void indirect_convolution_address_precalculation(
VSTORE(1)
(my[0].s[0], 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DST_DATA_TYPE) + y * dst_stride_y + z * dst_stride_z));
-} \ No newline at end of file
+}
+#endif // defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
+
+#if defined(INDIRECT_CONVOLUTION_NHWC)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the indirect convolution.
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The vector length used for loading the values from the indirect buffer should be passed at compile time using -DIND_BUFF_VEC_SIZE (e.g. -DIND_BUFF_VEC_SIZE=4)
+ * @note The activation function to fuse and corresponding A and B values should be passed at compile time using -DACTIVATION_TYPE, -DA_VAL, and -DB_VAL
+ * (e.g. -DFUNCTION_TYPE=lu_brelu_op, -DA_VAL=3.0, and -DB_VAL=1.0)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, 6, and 8
+ * - N0 = 2, 3, 4, 8, 16
+ * - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
+ *
+ * @param[in] src_img (Not supported) CLImage object to the source tensor (SRC_TENSOR_TYPE=IMAGE only)
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] off_img (Not supported) CLImage object to the indirect buffer tensor (OFF_TENSOR_TYPE=IMAGE only)
+ * @param[in] off_ptr Pointer to the indirect buffer tensor. Supported data type: INT32
+ * @param[in] off_stride_y Stride of the indirect buffer tensor in Y dimension (in bytes)
+ * @param[in] off_stride_z Stride of the indirect buffer tensor in Z dimension (in bytes)
+ * @param[in] off_stride_w Stride of the indirect buffer tensor in W dimension (in bytes)
+ * @param[in] off_c The size of the channels dimension of the indirect buffer tensor
+ * @param[in] off_w The size of the width dimension of the indirect buffer tensor
+ * @param[in] off_h The size of the height dimension of the indirect buffer tensor
+ * @param[in] off_n The size of the batches dimension of the indirect buffer tensor
+ * @param[in] off_offset_first_element_in_bytes The offset of the first element in the indirect buffer tensor
+ * @param[out] dst_img (Not supported) CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as the input tensor
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[out] wei_img (Optional) CLImage object to the destination tensor (WEI_TENSOR_TYPE=IMAGE only)
+ * @param[out] wei_ptr Pointer to the weights tensor. Supported data type: same as the input tensor
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches dimension of the weights tensor
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[out] bia_img (Not supported) CLImage object to the destination tensor (BIA_TENSOR_TYPE=IMAGE only)
+ * @param[out] bia_ptr (Optional) Pointer to the bias tensor. Supported data type: same as the input tensor
+ * @param[in] bia_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bia_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bia_stride_w (Optional) Stride of the bias tensor in W dimension (in bytes)
+ * @param[in] bia_c (Optional) The size of the channels dimension of the bias tensor
+ * @param[in] bia_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bia_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bia_n (Optional) The size of the batches dimension of the bias tensor
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ */
+//! @endcond
+__kernel void indirect_convolution_nhwc(
+ TENSOR4D_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_T(off, OFF_TENSOR_TYPE),
+ TENSOR4D_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_T(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_CHANNELS SRC_CHANNELS
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+
+ off_offset_first_element_in_bytes += get_global_id(1) * off_stride_y;
+ off_offset_first_element_in_bytes += bout * off_stride_z;
+
+ // Initialize the accumulators
+ TILE(DST_DATA_TYPE, M0, N0, c);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ for(int i = 0; i < (_IWEI_WIDTH * _IWEI_HEIGHT); ++i)
+ {
+ TILE(int, 1, IND_BUFF_VEC_SIZE, my);
+ T_LOAD(int, 1, IND_BUFF_VEC_SIZE, OFF_TENSOR_TYPE, off, i * M0, 0, 1, 0, my);
+
+ int ck = 0;
+ for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
+ {
+ TILE(SRC_DATA_TYPE, M0, K0, a);
+ TILE(WEI_DATA_TYPE, N0, K0, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.0;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
+ }
+
+ // This #if directive should be removed in case of dynamic tensor support
+#if defined(LEFTOVER_LOOP)
+ // Left-over accumulations
+ for(; ck < _ISRC_CHANNELS; ++ck)
+ {
+ TILE(SRC_DATA_TYPE, M0, 1, a);
+ TILE(WEI_DATA_TYPE, N0, 1, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.0;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
+ T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
+ }
+#endif // defined(LEFTOVER_LOOP)
+ }
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, c, bias0, c);
+
+#endif // HAS_BIAS
+
+ // Apply activation
+ T_ACTIVATION(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
+ dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ })
+
+ const bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, c, dst_indirect_y);
+
+#undef _IWEI_WIDTH
+#undef _IWEI_HEIGHT
+#undef _ISRC_CHANNELS
+#undef _IDST_WIDTH
+#undef _IDST_HEIGHT
+#undef _IY_MULTIPLIER
+}
+#endif // defined(INDIRECT_CONVOLUTION_NHWC)
diff --git a/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl b/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
index 8872c31229..c01a44f117 100644
--- a/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
+++ b/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
@@ -114,18 +114,18 @@ __kernel void transposed_convolution_nhwc(
// .v = access the whole vector (OpenCL vector)
// .s[x] = access the vector element at position x (scalar access)
- TILE(int, M0, 1, xi);
- TILE(int, M0, 1, yi);
- TILE(int, M0, 1, xu);
- TILE(int, M0, 1, yu);
+ TILE(int, 1, M0, xi);
+ TILE(int, 1, M0, yi);
+ TILE(int, 1, M0, xu);
+ TILE(int, 1, M0, yu);
// Convert the linear index to coordinate
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- xu[i].v = ((mout + i) % _IDST_WIDTH) - PAD_LEFT;
- yu[i].v = ((mout + i) / _IDST_WIDTH) - PAD_TOP;
- xi[i].v = ceil(xu[i].v / (float)STRIDE_X);
- yi[i].v = ceil(yu[i].v / (float)STRIDE_Y);
+ xu[0].s[i] = ((mout + i) % _IDST_WIDTH) - PAD_LEFT;
+ yu[0].s[i] = ((mout + i) / _IDST_WIDTH) - PAD_TOP;
+ xi[0].s[i] = ceil(xu[0].s[i] / (float)STRIDE_X);
+ yi[0].s[i] = ceil(yu[0].s[i] / (float)STRIDE_Y);
})
// Initialize the accumulators
@@ -137,8 +137,8 @@ __kernel void transposed_convolution_nhwc(
})
// Flipped indices
- const int x_start = _IWEI_WIDTH - (xi[0].v * STRIDE_X - xu[0].v) - 1;
- const int y_start = _IWEI_HEIGHT - (yi[0].v * STRIDE_Y - yu[0].v) - 1;
+ const int x_start = _IWEI_WIDTH - (xi[0].s[0] * STRIDE_X - xu[0].s[0]) - 1;
+ const int y_start = _IWEI_HEIGHT - (yi[0].s[0] * STRIDE_Y - yu[0].s[0]) - 1;
for(int yk = y_start, yi_step = 0; yk >= 0; yk -= STRIDE_Y, ++yi_step)
{
@@ -146,18 +146,18 @@ __kernel void transposed_convolution_nhwc(
{
int weights_y = cout * _IY_MULTIPLIER + yk * _IWEI_WIDTH + xk;
- TILE(int, M0, 1, my);
+ TILE(int, 1, M0, my);
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- int x_s = xi[i].v + xi_step;
- int y_s = yi[i].v + yi_step;
- my[i].v = x_s + y_s *_ISRC_WIDTH;
- my[i].v = my[i].v + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
- my[i].v = select(-1, my[i].v, x_s >= 0);
- my[i].v = select(-1, my[i].v, x_s < _ISRC_WIDTH);
- my[i].v = select(-1, my[i].v, y_s >= 0);
- my[i].v = select(-1, my[i].v, y_s < _ISRC_HEIGHT);
+ int x_s = xi[0].s[i] + xi_step;
+ int y_s = yi[0].s[i] + yi_step;
+ my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
+ my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
+ my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
+ my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
})
int ck = 0;
@@ -178,7 +178,7 @@ __kernel void transposed_convolution_nhwc(
})
// Load tile from the src tensor
- T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, my, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, weights_y, _IY_MULTIPLIER, wei_stride_y, b);
@@ -187,7 +187,6 @@ __kernel void transposed_convolution_nhwc(
T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
}
- // We voluntarily use SRC_CHANNELS rather than _DSRC_CHANNELS
// This #if directive should be removed in case of dynamic tensor support
#if defined(LEFTOVER_LOOP)
// Left-over accumulations
@@ -204,7 +203,7 @@ __kernel void transposed_convolution_nhwc(
// Load tile from the src tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
- T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, BUFFER, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, my, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, BUFFER, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h
index 998bc9efb2..861ea63eca 100644
--- a/src/core/CL/cl_kernels/tile_helpers.h
+++ b/src/core/CL/cl_kernels/tile_helpers.h
@@ -653,13 +653,27 @@
}) \
})
-#define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, yi, dst) \
+/** Load a tile from global memory (tensor) using an indirect buffer for the Y coordinates
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] C Starting C index
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate
+ * 16 is the maximum indirect buffer size.
+ * @param[out] dst Output tile
+ */
+#define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \
({ \
LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
{ \
- if(yi[_i].v >= 0) \
+ if(yi[0].s[_i] >= 0) \
{ \
- dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[_i].v, STRIDE_Y); \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \
} \
}) \
})
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index 0ff7dfe1f3..5f2fcd61fa 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -433,6 +433,7 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "im2col3x3_nhwc", "nhwc/im2col.cl" },
{ "im2col9x9_nhwc", "nhwc/im2col.cl" },
{ "im2col_generic_nhwc", "nhwc/im2col.cl" },
+ { "indirect_convolution_nhwc", "nhwc/indirect_convolution.cl" },
{ "indirect_convolution_address_precalculation", "nhwc/indirect_convolution.cl" },
{ "normalization_layer_cross_map_nhwc", "nhwc/normalization_layer.cl" },
{ "normalization_layer_in_map_nhwc", "nhwc/normalization_layer.cl" },
diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
index 781627117a..d9271e24d9 100644
--- a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
+++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
@@ -86,6 +86,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co
if(data_layout == DataLayout::NHWC)
{
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.n0 != 1 && desc.n0 != 2 && desc.n0 != 3 && desc.n0 != 4 && desc.n0 != 8 && desc.n0 != 16,
"N0 can only be: 1, 2, 3, 4, 8, and 16");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.k0 != 1 && desc.k0 != 2 && desc.k0 != 3 && desc.k0 != 4 && desc.k0 != 8 && desc.k0 != 16,
diff --git a/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp
index 95186fe106..8e12f23fa6 100644
--- a/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp
+++ b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp
@@ -80,8 +80,8 @@ void ClIndirectConv2dAddressPrecalculationKernel::configure(const CLCompileConte
ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, dst, conv_info, desc));
- constexpr unsigned int width_idx = 1;
- constexpr unsigned int height_idx = 2;
+ constexpr unsigned int width_idx = 1;
+ constexpr unsigned int height_idx = 2;
// Get dst shape
TensorShape output_shape = misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(),
@@ -127,6 +127,9 @@ void ClIndirectConv2dAddressPrecalculationKernel::configure(const CLCompileConte
build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
build_options.add_option("-DM0=" + support::cpp11::to_string(desc.m0));
+ // A macro guard to compile ONLY the kernel of interest
+ build_options.add_option("-D" + upper_string(kernel_name.str()));
+
_kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
// Since this kernel should be called only once, we do not need to set the config_id for tuning
diff --git a/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp
new file mode 100644
index 0000000000..3448377cb5
--- /dev/null
+++ b/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/kernels/ClIndirectConv2dKernel.h"
+
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/CL/CLUtils.h"
+#include "src/core/CL/CLValidate.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h"
+#include "support/Cast.h"
+#include "support/StringSupport.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *indirect_buffer, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indirect_buffer, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(indirect_buffer->tensor_shape(),
+ misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(),
+ src->data_layout(),
+ weights->tensor_shape(),
+ conv_info,
+ desc));
+
+ constexpr int channel_idx = 0;
+ constexpr int batch_idx = 3;
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != src->dimension(channel_idx), "Weights feature map dimension should match the respective src's one");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.n0 != 1 && desc.n0 != 2 && desc.n0 != 3 && desc.n0 != 4 && desc.n0 != 8 && desc.n0 != 16,
+ "N0 can only be: 1, 2, 3, 4, 8, and 16");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.k0 != 1 && desc.k0 != 2 && desc.k0 != 3 && desc.k0 != 4 && desc.k0 != 8 && desc.k0 != 16,
+ "K0 can only be: 1, 2, 3, 4, 8, and 16");
+
+ if(desc.export_weights_to_cl_image)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.k0 != 4 && desc.k0 != 8 && desc.k0 != 16,
+ "K0 can only be: 4, 8, and 16");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!export_to_cl_image(weights),
+ "Export to CLImage is not supported for this weight configuration");
+ }
+
+ if(biases != nullptr)
+ {
+ if(is_data_type_quantized_asymmetric(src->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(channel_idx) != weights->dimension(batch_idx),
+ "Biases size and number of dst feature maps should match");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1,
+ "Biases should be one dimensional");
+ }
+
+ // Checks performed when dst is configured
+ if(dst->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(),
+ misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+ }
+
+ return Status{};
+}
+} // namespace
+
+ClIndirectConv2dKernel::ClIndirectConv2dKernel()
+{
+ _type = CLKernelType::DIRECT;
+}
+
+void ClIndirectConv2dKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *indirect_buffer, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, indirect_buffer, dst);
+
+ // Perform validation
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, indirect_buffer, dst, conv_info, act_info, desc));
+
+ constexpr unsigned int channel_idx = 0;
+ constexpr unsigned int width_idx = 1;
+ constexpr unsigned int height_idx = 2;
+ const unsigned int kernel_width = weights->dimension(width_idx);
+ const unsigned int kernel_height = weights->dimension(height_idx);
+ const DataType data_type = src->data_type();
+
+ const GPUTarget gpu_target = get_target();
+
+ // Get dst shape
+ TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*dst, output_shape,
+ 1,
+ src->data_type(),
+ src->quantization_info());
+
+ // Configure kernel window
+ Window win;
+ output_shape.collapse(2U, 1U);
+ const unsigned int n0 = adjust_vec_size(desc.n0, output_shape[0]);
+ const unsigned int m0 = adjust_vec_size(desc.m0, output_shape[1]);
+ const unsigned int k0 = adjust_vec_size(desc.k0, src->dimension(channel_idx));
+
+ const unsigned int partial_store_n0 = dst->dimension(channel_idx) % n0;
+
+ // Create window and update padding
+ win = calculate_max_window(output_shape, Steps(n0, m0));
+
+ ICLKernel::configure_internal(win);
+
+ std::stringstream kernel_name;
+ CLBuildOptions build_options;
+
+ kernel_name << "indirect_convolution_nhwc";
+
+ _export_to_cl_image = desc.export_weights_to_cl_image;
+
+ // Update the padding for the weights tensor if we can export to cl_image
+ if(_export_to_cl_image)
+ {
+ gemm::update_padding_for_cl_image(weights);
+ }
+
+ // Add padding to indirect buffer to avoid out-of-bound reads
+ // When M0 is 5, 6, and 7, we use vload8 to fetch the data from the buffer
+ const unsigned int load_indirect_buf_size = m0 > 4 ? 8 : m0;
+ const unsigned int indirect_buf_width = indirect_buffer->tensor_shape()[0];
+ const unsigned int round_up_width = ((indirect_buf_width + load_indirect_buf_size - 1) / load_indirect_buf_size) * load_indirect_buf_size;
+ const unsigned int padding = round_up_width - indirect_buf_width;
+ indirect_buffer->extend_padding(PaddingSize(0, indirect_buffer->padding().right + padding, 0, 0));
+
+ if(biases != nullptr)
+ {
+ build_options.add_option(std::string("-DHAS_BIAS"));
+ build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(biases->data_type())));
+ }
+
+ // Conditions of -cl-fast-relaxed-math causing accuracy issues can be traced from COMPMID-5324
+ const auto act_function = act_info.activation();
+
+ if((gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST)
+ && (act_function == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU || act_function == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ && (data_type == DataType::F32 || data_type == DataType::F16))
+ {
+ // -cl-fast-relaxed-math also sets -cl-finite-math-only and -cl-unsafe-math-optimizations
+ // to disable -cl-finite-math-only, we only include -cl-unsafe-math-optimizations
+ build_options.add_option("-cl-unsafe-math-optimizations");
+ }
+ else
+ {
+ build_options.add_option("-cl-fast-relaxed-math");
+ }
+
+ build_options.add_option("-DSRC_TENSOR_TYPE=BUFFER");
+ build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src->dimension(channel_idx)));
+ build_options.add_option("-DOFF_TENSOR_TYPE=BUFFER");
+ build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(width_idx)));
+ build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(height_idx)));
+ build_options.add_option("-DDST_TENSOR_TYPE=BUFFER");
+ build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_options.add_option_if_else(_export_to_cl_image, "-DWEI_TENSOR_TYPE=IMAGE", "-DWEI_TENSOR_TYPE=BUFFER");
+ build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(kernel_width));
+ build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(kernel_height));
+ build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
+ build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
+ build_options.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0));
+ build_options.add_option("-DIND_BUFF_VEC_SIZE=" + support::cpp11::to_string(load_indirect_buf_size));
+ build_options.add_option_if((src->dimension(channel_idx) % k0) != 0, "-DLEFTOVER_LOOP");
+ build_options.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_function)));
+ build_options.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
+ build_options.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
+
+ // A macro guard to compile ONLY the kernel of interest
+ build_options.add_option("-D" + upper_string(kernel_name.str()));
+
+ if(compile_context.get_ddk_version() >= 30)
+ {
+ build_options.add_option("-fregister-allocation=64");
+ }
+
+ _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
+
+ // Set config_id for enabling LWS tuning
+ _config_id = kernel_name.str();
+ _config_id += "_";
+ _config_id += lower_string(string_from_data_type(data_type));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(kernel_width);
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(kernel_height);
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(src->dimension(width_idx));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(src->dimension(height_idx));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(src->dimension(channel_idx));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(dst->dimension(width_idx));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(dst->dimension(height_idx));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(dst->dimension(channel_idx));
+}
+
+Status ClIndirectConv2dKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *indirect_buffer, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const DirectConvComputeKernelInfo &desc)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, indirect_buffer, dst, conv_info, act_info, desc));
+ return Status{};
+}
+
+void ClIndirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ // Get initial windows
+ Window slice = window.first_slice_window_3D();
+
+ const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
+ const auto weights = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
+ const auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
+ const auto indirect_buffer = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_3));
+ auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+ cl::Image2D weights_cl_image;
+
+ if(_export_to_cl_image)
+ {
+ const size_t image_w = weights->info()->dimension(0) / 4;
+ const size_t image_h = weights->info()->dimension(1) * weights->info()->dimension(2) * weights->info()->dimension(3);
+ const TensorShape shape2d(image_w, image_h);
+ const size_t image_row_pitch = weights->info()->strides_in_bytes()[1];
+
+ // Export cl_buffer to cl_image
+ weights_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), weights->cl_buffer(), shape2d, weights->info()->data_type(), image_row_pitch);
+ }
+
+ unsigned int idx = 0;
+ add_4d_tensor_nhwc_argument(idx, src);
+ add_4d_tensor_nhwc_argument(idx, indirect_buffer);
+ add_4d_tensor_nhwc_argument(idx, dst);
+ if(_export_to_cl_image)
+ {
+ _kernel.setArg(idx++, weights_cl_image);
+ }
+ add_4d_tensor_nhwc_argument(idx, weights);
+ if(biases != nullptr)
+ {
+ add_1D_tensor_argument(idx, biases, slice);
+ }
+ enqueue(queue, *this, slice, lws_hint());
+}
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/gpu/cl/kernels/ClIndirectConv2dKernel.h b/src/gpu/cl/kernels/ClIndirectConv2dKernel.h
new file mode 100644
index 0000000000..d86029c5c9
--- /dev/null
+++ b/src/gpu/cl/kernels/ClIndirectConv2dKernel.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H
+#define ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+// Forward declaration
+struct DirectConvComputeKernelInfo;
+
+namespace opencl
+{
+namespace kernels
+{
+/** Interface for the indirect convolution kernel. */
+class ClIndirectConv2dKernel : public IClKernel
+{
+public:
+ ClIndirectConv2dKernel();
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClIndirectConv2dKernel);
+ /** Set the src, offset, weights, biases and dst tensors info.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src The src tensor info to convolve. 3 lower dimensions represent a single src [IFM, width, height],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: F16/F32.
+ * @param[in] off The indirect buffer tensor info. Data types supported: S32.
+ * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [IFM, kernel_x, kernel_y, OFM].
+ * Data type supported: Same as @p src.
+ * @param[in] biases Biases tensor info. Biases are 1D tensor with dimension [OFM].
+ * Data type supported: Same as @p src.
+ * @param[out] dst Output tensor info.
+ * The 3rd dimension must be equal to the 4th dimension of the @p weights tensor. Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info Contains activaton information described in @ref ActivationLayerInfo.
+ * @param[in] desc Direct convolution descriptor used to build the NHWC indirect convolution kernel.
+ */
+ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *off, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const DirectConvComputeKernelInfo &desc);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClIndirectConv2dKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *off, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const DirectConvComputeKernelInfo &desc);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
+
+public:
+ bool _export_to_cl_image{ false };
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_DIRECT_CONV2D_KERNEL_H */
diff --git a/src/gpu/cl/operators/ClIndirectConv2d.cpp b/src/gpu/cl/operators/ClIndirectConv2d.cpp
new file mode 100644
index 0000000000..26df6838ed
--- /dev/null
+++ b/src/gpu/cl/operators/ClIndirectConv2d.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/operators/ClIndirectConv2d.h"
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h"
+#include "src/gpu/cl/kernels/ClIndirectConv2dKernel.h"
+#include "src/gpu/cl/kernels/direct_conv/ClDirectConvKernelConfig.h"
+#include "src/gpu/cl/kernels/direct_conv/IClDirectConvKernelConfig.h"
+
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/gpu/cl/utils/ClAuxTensorHandler.h"
+
+#include "src/common/utils/Log.h"
+
+using namespace arm_compute::cl_direct_conv;
+
+namespace arm_compute
+{
+namespace opencl
+{
+using namespace arm_compute::experimental;
+
+namespace
+{
+DirectConvComputeKernelInfo config_direct_convolution_nhwc(const ITensorInfo *src, const ITensorInfo *weights, const PadStrideInfo &conv_info)
+{
+ // Get GPU target
+ GPUTarget gpu_target = CLScheduler::get().target();
+
+ std::unique_ptr<IClDirectConvKernelConfig> t = ClDirectConvKernelConfigurationFactory::create(gpu_target);
+
+ return t->configure(src, weights, conv_info);
+}
+
+} // namespace
+
+void ClIndirectConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv_info, act_info);
+
+ // Reuse the direct convolution descriptor
+ const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, weights, conv_info);
+
+ // Configure indirect convolution kernels
+ auto k0 = std::make_unique<kernels::ClIndirectConv2dAddressPrecalculationKernel>();
+ auto k1 = std::make_unique<kernels::ClIndirectConv2dKernel>();
+
+ k0->set_target(CLScheduler::get().target());
+ k1->set_target(CLScheduler::get().target());
+
+ k0->configure(compile_context, src, weights, &_indirect_buffer, conv_info, desc);
+ k1->configure(compile_context, src, weights, biases, &_indirect_buffer, dst, conv_info, act_info, desc);
+
+ _addr_precalculation_kernel = std::move(k0);
+ _indirect_conv_kernel = std::move(k1);
+ _is_prepared = false;
+
+ // Tune kernels
+ CLScheduler::get().tune_kernel_static(*_indirect_conv_kernel);
+
+ // Request memory for the indirect buffer
+ _aux_mem[IndirectBuffer] = MemoryInfo(offset_int_vec(IndirectBuffer), MemoryLifetime::Persistent, _indirect_buffer.total_size());
+}
+
+Status ClIndirectConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ // Initialize the direct convolution descriptor
+ const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, weights, conv_info);
+
+ TensorShape ind_buffer_shape = misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(),
+ src->data_layout(),
+ weights->tensor_shape(),
+ conv_info,
+ desc);
+
+ TensorInfo indirect_buffer(ind_buffer_shape, 1, DataType::S32);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClIndirectConv2dAddressPrecalculationKernel::validate(src, weights, &indirect_buffer, conv_info, desc));
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClIndirectConv2dKernel::validate(src, weights, biases, &indirect_buffer, dst, conv_info, act_info, desc));
+
+ return Status{};
+}
+
+void ClIndirectConv2d::run(ITensorPack &tensors)
+{
+ CLAuxTensorHandler indirect_buffer(offset_int_vec(IndirectBuffer), _indirect_buffer, tensors, true);
+
+ prepare(tensors);
+
+ ITensorPack indirect_conv2d_pack(tensors);
+ indirect_conv2d_pack.add_const_tensor(ACL_SRC_3, indirect_buffer.get());
+
+ // Run indirect convolution
+ CLScheduler::get().enqueue_op(*_indirect_conv_kernel, indirect_conv2d_pack, true);
+}
+
+void ClIndirectConv2d::prepare(ITensorPack &constants)
+{
+ if(!_is_prepared)
+ {
+ ICLTensor *indirect_buffer_aux = utils::cast::polymorphic_downcast<ICLTensor *>(constants.get_tensor(offset_int_vec(IndirectBuffer)));
+ ARM_COMPUTE_ERROR_ON(indirect_buffer_aux == nullptr);
+
+ ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL("Preparing indirect buffer");
+
+ CLAuxTensorHandler indirect_buffer(_indirect_buffer, *indirect_buffer_aux);
+ ARM_COMPUTE_ERROR_ON(indirect_buffer.get()->cl_buffer().get() == nullptr);
+
+ ITensorPack indirect_buffer_pack{ { ACL_DST, indirect_buffer.get() } };
+ CLScheduler::get().enqueue_op(*_addr_precalculation_kernel, indirect_buffer_pack, true);
+
+ _is_prepared = true;
+ }
+}
+
+experimental::MemoryRequirements ClIndirectConv2d::workspace() const
+{
+ return _aux_mem;
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/gpu/cl/operators/ClIndirectConv2d.h b/src/gpu/cl/operators/ClIndirectConv2d.h
new file mode 100644
index 0000000000..917a67f421
--- /dev/null
+++ b/src/gpu/cl/operators/ClIndirectConv2d.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_INDIRECT_CONV2D_H
+#define ARM_COMPUTE_CL_INDIRECT_CONV2D_H
+
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTypes.h"
+
+#include "src/gpu/cl/ClCompileContext.h"
+#include "src/gpu/cl/IClKernel.h"
+#include "src/gpu/cl/IClOperator.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+// Forward declaration
+struct DirectConvComputeKernelInfo;
+
+namespace opencl
+{
+/** Basic function to execute indirect convolution on OpenCL. This function calls the following OpenCL kernels:
+ *
+ * -# @ref kernels::ClIndirectConv2dAddressPrecalculationKernel
+ * -# @ref kernels::ClIndirectConv2dKernel
+ */
+class ClIndirectConv2d : public IClOperator
+{
+public:
+ ClIndirectConv2d() = default;
+ /** Initialise the kernel's inputs and output
+ *
+ * Valid data layouts:
+ * - NHWC
+ *
+ * Valid data type configurations:
+ * |src0 |src1 |src2 |dst |
+ * |:------------|:-----------|:---------|:--------------|
+ * |F32 |F32 |F32 |F32 |
+ * |F16 |F16 |F16 |F16 |
+ *
+ * @note All tensors must have the same data type.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in] src Source tensor. 3 lower dimensions represent a single src,
+ * while every optional dimension from 4 and above represent a batch of sources.
+ * Data types supported: F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions. Data type supported:Same as @p src.
+ * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
+ * Data type supported: Should match @p src data type.
+ * @param[out] dst Destination tensor. 3 lower dimensions represent a single dst, while the rest represent batch of destinations.
+ * Data types supported: Same as @p src.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ */
+ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to ClIndirectConv2d::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+
+ // Inherited methods overridden:
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &constants) override;
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ enum AuxTensorIdx
+ {
+ IndirectBuffer = 0,
+ Count
+ };
+
+ std::unique_ptr<IClKernel> _indirect_conv_kernel{ nullptr };
+ std::unique_ptr<IClKernel> _addr_precalculation_kernel{ nullptr };
+ TensorInfo _indirect_buffer{};
+ bool _is_prepared{ false };
+ experimental::MemoryRequirements _aux_mem{ Count };
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_INDIRECT_CONV2D_H */
diff --git a/src/runtime/CL/functions/CLIndirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLIndirectConvolutionLayer.cpp
new file mode 100644
index 0000000000..90af36aa77
--- /dev/null
+++ b/src/runtime/CL/functions/CLIndirectConvolutionLayer.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "src/gpu/cl/operators/ClIndirectConv2d.h"
+
+#include "src/common/utils/Log.h"
+
+namespace arm_compute
+{
+struct CLIndirectConvolutionLayer::Impl
+{
+ const ICLTensor *src{ nullptr };
+ const ICLTensor *weights{ nullptr };
+ const ICLTensor *biases{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClIndirectConv2d> op{ nullptr };
+};
+
+CLIndirectConvolutionLayer::CLIndirectConvolutionLayer()
+ : _impl(std::make_unique<Impl>())
+{
+}
+CLIndirectConvolutionLayer::CLIndirectConvolutionLayer(CLIndirectConvolutionLayer &&) = default;
+CLIndirectConvolutionLayer &CLIndirectConvolutionLayer::operator=(CLIndirectConvolutionLayer &&) = default;
+CLIndirectConvolutionLayer::~CLIndirectConvolutionLayer() = default;
+
+void CLIndirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info);
+}
+
+void CLIndirectConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+ ARM_COMPUTE_LOG_PARAMS(input, weights, biases, output, conv_info, act_info);
+
+ _impl->src = input;
+ _impl->weights = weights;
+ _impl->biases = biases;
+ _impl->dst = output;
+ _impl->op = std::make_unique<opencl::ClIndirectConv2d>();
+ _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, act_info);
+}
+
+Status CLIndirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ const ActivationLayerInfo &act_info)
+{
+ return opencl::ClIndirectConv2d::validate(input, weights, biases, output, conv_info, act_info);
+}
+
+void CLIndirectConvolutionLayer::run()
+{
+ ITensorPack pack;
+ pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+ pack.add_tensor(TensorType::ACL_SRC_1, _impl->weights);
+ pack.add_tensor(TensorType::ACL_SRC_2, _impl->biases);
+ pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+ _impl->op->run(pack);
+}
+}
diff --git a/tests/validation/CL/IndirectConvolutionLayer.cpp b/tests/validation/CL/IndirectConvolutionLayer.cpp
new file mode 100644
index 0000000000..aedf070e6b
--- /dev/null
+++ b/tests/validation/CL/IndirectConvolutionLayer.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/DirectConvolutionLayerFixture.h"
+
+// Note: Since the interface of indirect convolution is the same of direct convolution, we can reuse
+// the direct convolution fixture
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+RelativeTolerance<half> tolerance_fp16(half(0.2)); /**< Tolerance for floating point tests */
+RelativeTolerance<float> tolerance_fp32(0.05f); /**< Tolerance for floating point tests */
+constexpr float abs_tolerance_f32(0.0001f); /**< Absolute tolerance for FP32 tests*/
+constexpr float tolerance_num = 0.07f; /**< Tolerance number */
+
+/** Activation function Dataset*/
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) });
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(IndirectConvolutionLayer)
+
+/** Check whether the configuration of a indirect convolution layer with no
+ * bias leads to a successful run.
+ */
+TEST_CASE(NoBias, framework::DatasetMode::PRECOMMIT)
+{
+ const TensorShape src_shape_nhwc = TensorShape(8U, 27U, 13U);
+ const TensorShape wei_shape_nhwc = TensorShape(8U, 3U, 3U, 4U);
+ const TensorShape bia_shape = TensorShape(4U);
+ const TensorShape dst_shape_nhwc = TensorShape(4U, 25U, 11U);
+ constexpr DataType dt = DataType::F32;
+ constexpr DataLayout data_layout = DataLayout::NHWC;
+
+ auto src_nhwc = create_tensor<CLTensor>(src_shape_nhwc, dt, 1, QuantizationInfo(), data_layout);
+ auto wei_nhwc = create_tensor<CLTensor>(wei_shape_nhwc, dt, 1, QuantizationInfo(), data_layout);
+ auto dst_nhwc = create_tensor<CLTensor>(dst_shape_nhwc, dt, 1, QuantizationInfo(), data_layout);
+
+ TensorShape src_shape_nchw = src_shape_nhwc;
+ TensorShape wei_shape_nchw = wei_shape_nhwc;
+ TensorShape dst_shape_nchw = dst_shape_nhwc;
+
+ permute(src_shape_nchw, PermutationVector(1U, 2U, 0U));
+ permute(wei_shape_nchw, PermutationVector(1U, 2U, 0U, 3U));
+ permute(dst_shape_nchw, PermutationVector(1U, 2U, 0U));
+
+ const PadStrideInfo conv_info = PadStrideInfo(1, 1, 0, 0);
+
+ // Create indirect Convolution function
+ CLIndirectConvolutionLayer conv{};
+ conv.configure(&src_nhwc, &wei_nhwc, nullptr, &dst_nhwc, conv_info);
+
+ src_nhwc.allocator()->allocate();
+ wei_nhwc.allocator()->allocate();
+ dst_nhwc.allocator()->allocate();
+
+ library->fill_tensor_value(CLAccessor(src_nhwc), 1.f);
+ library->fill_tensor_value(CLAccessor(wei_nhwc), 1.f);
+
+ conv.run();
+
+ // Compute reference to compare
+ SimpleTensor<float> ref_src{ src_shape_nchw, dt };
+ SimpleTensor<float> ref_wei{ wei_shape_nchw, dt };
+ SimpleTensor<float> ref_bia{ bia_shape, dt };
+ library->fill_tensor_value(ref_src, 1.f);
+ library->fill_tensor_value(ref_wei, 1.f);
+ // No bias
+ library->fill_tensor_value(ref_bia, 0.f);
+ auto ref_dst = reference::convolution_layer<float>(ref_src, ref_wei, ref_bia, dst_shape_nchw, conv_info);
+
+ validate(CLAccessor(dst_nhwc), ref_dst);
+}
+
+/** Check whether the case of rectangle kernels i.e. when width and height of the weight_shape are not equal
+ * would lead to successful run
+ */
+TEST_CASE(NonSquareKernel, framework::DatasetMode::PRECOMMIT)
+{
+ const TensorShape src_shape_nhwc = TensorShape(3U, 33U, 27U);
+ const TensorShape wei_shape_nhwc = TensorShape(3U, 5U, 7U, 4U); // non-square kernel
+ const TensorShape bia_shape = TensorShape(4U);
+ const TensorShape dst_shape_nhwc = TensorShape(4U, 11U, 12U);
+ constexpr DataType dt = DataType::F32;
+ constexpr DataLayout data_layout = DataLayout::NHWC;
+
+ auto src_nhwc = create_tensor<CLTensor>(src_shape_nhwc, dt, 1, QuantizationInfo(), data_layout);
+ auto wei_nhwc = create_tensor<CLTensor>(wei_shape_nhwc, dt, 1, QuantizationInfo(), data_layout);
+ auto dst_nhwc = create_tensor<CLTensor>(dst_shape_nhwc, dt, 1, QuantizationInfo(), data_layout);
+
+ TensorShape src_shape_nchw = src_shape_nhwc;
+ TensorShape wei_shape_nchw = wei_shape_nhwc;
+ TensorShape dst_shape_nchw = dst_shape_nhwc;
+
+ permute(src_shape_nchw, PermutationVector(1U, 2U, 0U));
+ permute(wei_shape_nchw, PermutationVector(1U, 2U, 0U, 3U));
+ permute(dst_shape_nchw, PermutationVector(1U, 2U, 0U));
+
+ const PadStrideInfo conv_info = PadStrideInfo(3, 2, 1, 1, 2, 0, DimensionRoundingType::FLOOR);
+
+ // Create indirect convolution function
+ CLIndirectConvolutionLayer conv{};
+ conv.configure(&src_nhwc, &wei_nhwc, nullptr, &dst_nhwc, conv_info);
+
+ src_nhwc.allocator()->allocate();
+ wei_nhwc.allocator()->allocate();
+ dst_nhwc.allocator()->allocate();
+
+ library->fill_tensor_value(CLAccessor(src_nhwc), 1.f);
+ library->fill_tensor_value(CLAccessor(wei_nhwc), 1.f);
+
+ conv.run();
+
+ // Compute reference to compare
+ SimpleTensor<float> ref_src{ src_shape_nchw, dt };
+ SimpleTensor<float> ref_wei{ wei_shape_nchw, dt };
+ SimpleTensor<float> ref_bia{ bia_shape, dt };
+ library->fill_tensor_value(ref_src, 1.f);
+ library->fill_tensor_value(ref_wei, 1.f);
+ // No bias
+ library->fill_tensor_value(ref_bia, 0.f);
+ auto ref_dst = reference::convolution_layer<float>(ref_src, ref_wei, ref_bia, dst_shape_nchw, conv_info);
+
+ validate(CLAccessor(dst_nhwc), ref_dst);
+}
+// *INDENT-OFF*
+// clang-format off
+// Note: Since the interface of indirect convolution is the same of direct convolution, we can reuse
+// the direct convolution fixture
+template <typename T>
+using CLIndirectConvolutionLayerFixture = DirectConvolutionValidationFixture<CLTensor, CLAccessor, CLIndirectConvolutionLayer, T>;
+template <typename T>
+using CLIndirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture<CLTensor, CLAccessor, CLIndirectConvolutionLayer, T, true>;
+
+TEST_SUITE(NHWC)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLIndirectConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 23U),
+ TensorShape(19U, 5U, 16U, 4U),
+ TensorShape(13U, 5U, 17U, 2U),
+ TensorShape(32U, 37U, 13U) } ),
+ framework::dataset::make("StrideX", { 1, 3, 1, 1 })),
+ framework::dataset::make("StrideY", { 1, 3, 2, 1 })),
+ framework::dataset::make("PadX", { 1, 3, 0, 4 })),
+ framework::dataset::make("PadY", { 1, 3, 0, 4 })),
+ framework::dataset::make("KernelSize", { 3, 8, 1, 9 })),
+ framework::dataset::make("NumKernels", { 17, 3, 1, 19 })),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp16, tolerance_num);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLIndirectConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ),
+ framework::dataset::make("StrideX", { 1 })),
+ framework::dataset::make("StrideY", { 1 })),
+ framework::dataset::make("PadX", { 1 })),
+ framework::dataset::make("PadY", { 1 })),
+ framework::dataset::make("KernelSize", { 9 })),
+ framework::dataset::make("NumKernels", { 3 })),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::IDENTITY) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp16, tolerance_num);
+}
+
+TEST_SUITE_END() // FP16
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLIndirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 23U),
+ TensorShape(19U, 5U, 16U, 4U),
+ TensorShape(13U, 5U, 17U, 2U),
+ TensorShape(32U, 37U, 13U) } ),
+ framework::dataset::make("StrideX", { 1, 3, 1, 1 })),
+ framework::dataset::make("StrideY", { 1, 3, 2, 1 })),
+ framework::dataset::make("PadX", { 1, 3, 0, 4 })),
+ framework::dataset::make("PadY", { 1, 3, 0, 4 })),
+ framework::dataset::make("KernelSize", { 3, 8, 1, 9 })),
+ framework::dataset::make("NumKernels", { 17, 3, 1, 19 })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.0, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLIndirectConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 23U),
+ TensorShape(19U, 5U, 16U, 4U),
+ TensorShape(13U, 5U, 17U, 2U),
+ TensorShape(32U, 37U, 13U) } ),
+ framework::dataset::make("StrideX", { 1 })),
+ framework::dataset::make("StrideY", { 2 })),
+ framework::dataset::make("PadX", { 1 })),
+ framework::dataset::make("PadY", { 3 })),
+ framework::dataset::make("KernelSize", { 3 })),
+ framework::dataset::make("NumKernels", { 3 })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.0, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLIndirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ),
+ framework::dataset::make("StrideX", { 1 })),
+ framework::dataset::make("StrideY", { 1 })),
+ framework::dataset::make("PadX", { 1 })),
+ framework::dataset::make("PadY", { 1 })),
+ framework::dataset::make("KernelSize", { 9 })),
+ framework::dataset::make("NumKernels", { 3 })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::IDENTITY) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp32, 0.0, abs_tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // NHWC
+TEST_SUITE_END() // IndirectConvolutionLayer
+TEST_SUITE_END() // CL
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp b/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp
index 240dcb686c..7500560c91 100644
--- a/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp
+++ b/tests/validation/reference/IndirectConv2dAddressPrecalculation.cpp
@@ -84,8 +84,7 @@ SimpleTensor<int32_t> indirect_conv2d_addr_precalculation(const TensorShape &sha
my = y_s < src_conv_height ? my : -1;
const unsigned int addr_out = mi + ki * m0 + y * (dst_width) + z * (dst_width * dst_height);
-
- out[addr_out] = my;
+ out[addr_out] = my;
}
}
}