From 945ae9e4027655267170ecc56563c362d8110d1e Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Wed, 13 Oct 2021 11:13:04 +0100 Subject: Implement CLDirectConv3D f32/f16 Resolve COMPMID-4660 Signed-off-by: Giorgio Arena Change-Id: Ibd66ec1eb6faa60086981b1e3a9c12561df3445f Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6420 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Gian Marco Iodice --- Android.bp | 4 + SConscript | 1 + arm_compute/runtime/CL/CLFunctions.h | 1 + arm_compute/runtime/CL/functions/CLConv3D.h | 101 ++++++++ filelist.json | 3 + src/core/CL/cl_kernels/common/gemmlowp.cl | 6 +- .../CL/cl_kernels/nhwc/direct_convolution3d.cl | 245 ++++++++++++++++++ src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl | 3 +- src/core/CL/cl_kernels/tile_helpers.h | 62 ++++- src/gpu/cl/ClKernelLibrary.cpp | 5 + src/gpu/cl/kernels/ClDirectConv2dKernel.cpp | 18 +- src/gpu/cl/kernels/ClDirectConv2dKernel.h | 4 +- src/gpu/cl/kernels/ClDirectConv3dKernel.cpp | 205 +++++++++++++++ src/gpu/cl/kernels/ClDirectConv3dKernel.h | 86 +++++++ src/gpu/cl/operators/ClDirectConv2d.cpp | 1 - src/gpu/cl/operators/ClDirectConv3d.cpp | 55 +++++ src/gpu/cl/operators/ClDirectConv3d.h | 86 +++++++ src/runtime/CL/functions/CLConv3D.cpp | 82 ++++++ tests/validation/CL/Convolution3D.cpp | 275 +++++++++++++++++++++ .../fixtures/DirectConvolution3DFixture.h | 8 +- utils/TypePrinter.h | 128 ++++++++++ 21 files changed, 1345 insertions(+), 34 deletions(-) create mode 100644 arm_compute/runtime/CL/functions/CLConv3D.h create mode 100644 src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl create mode 100644 src/gpu/cl/kernels/ClDirectConv3dKernel.cpp create mode 100644 src/gpu/cl/kernels/ClDirectConv3dKernel.h create mode 100644 src/gpu/cl/operators/ClDirectConv3d.cpp create mode 100644 src/gpu/cl/operators/ClDirectConv3d.h create mode 100644 src/runtime/CL/functions/CLConv3D.cpp create mode 100644 tests/validation/CL/Convolution3D.cpp diff --git a/Android.bp b/Android.bp index 8b73de5f2f..36d392de57 100644 --- a/Android.bp +++ b/Android.bp @@ -100,6 +100,7 @@ opencl_srcs = [ "src/core/CL/cl_kernels/nhwc/depth_to_space.cl", "src/core/CL/cl_kernels/nhwc/dequantization_layer.cl", "src/core/CL/cl_kernels/nhwc/direct_convolution.cl", + "src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl", "src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl", "src/core/CL/cl_kernels/nhwc/dwc_native_quantized_nhwc.cl", "src/core/CL/cl_kernels/nhwc/im2col.cl", @@ -512,6 +513,7 @@ cc_library_static { "src/gpu/cl/kernels/ClDepthConcatenateKernel.cpp", "src/gpu/cl/kernels/ClDequantizeKernel.cpp", "src/gpu/cl/kernels/ClDirectConv2dKernel.cpp", + "src/gpu/cl/kernels/ClDirectConv3dKernel.cpp", "src/gpu/cl/kernels/ClElementwiseKernel.cpp", "src/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp", "src/gpu/cl/kernels/ClFillKernel.cpp", @@ -566,6 +568,7 @@ cc_library_static { "src/gpu/cl/operators/ClCrop.cpp", "src/gpu/cl/operators/ClDequantize.cpp", "src/gpu/cl/operators/ClDirectConv2d.cpp", + "src/gpu/cl/operators/ClDirectConv3d.cpp", "src/gpu/cl/operators/ClElementwiseOperations.cpp", "src/gpu/cl/operators/ClElementwiseUnary.cpp", "src/gpu/cl/operators/ClFill.cpp", @@ -618,6 +621,7 @@ cc_library_static { "src/runtime/CL/functions/CLChannelShuffleLayer.cpp", "src/runtime/CL/functions/CLComparison.cpp", "src/runtime/CL/functions/CLConcatenateLayer.cpp", + "src/runtime/CL/functions/CLConv3D.cpp", "src/runtime/CL/functions/CLConvertFullyConnectedWeights.cpp", "src/runtime/CL/functions/CLConvolutionLayer.cpp", "src/runtime/CL/functions/CLCopy.cpp", diff --git a/SConscript b/SConscript index c88a86773c..6672caee9f 100644 --- a/SConscript +++ b/SConscript @@ -356,6 +356,7 @@ if env['opencl'] and env['embed_kernels']: 'src/core/CL/cl_kernels/nhwc/batchnormalization_layer.cl', 'src/core/CL/cl_kernels/nhwc/channel_shuffle.cl', 'src/core/CL/cl_kernels/nhwc/direct_convolution.cl', + 'src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl', 'src/core/CL/cl_kernels/nhwc/depth_to_space.cl', 'src/core/CL/cl_kernels/nhwc/dequantization_layer.cl', 'src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl', diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h index 442d407660..3dd635b14b 100644 --- a/arm_compute/runtime/CL/CLFunctions.h +++ b/arm_compute/runtime/CL/CLFunctions.h @@ -38,6 +38,7 @@ #include "arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h" #include "arm_compute/runtime/CL/functions/CLComparison.h" #include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h" +#include "arm_compute/runtime/CL/functions/CLConv3D.h" #include "arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h" #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h" #include "arm_compute/runtime/CL/functions/CLCopy.h" diff --git a/arm_compute/runtime/CL/functions/CLConv3D.h b/arm_compute/runtime/CL/functions/CLConv3D.h new file mode 100644 index 0000000000..241481b8ba --- /dev/null +++ b/arm_compute/runtime/CL/functions/CLConv3D.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CLCONVOLUTION3DLAYER_H +#define ARM_COMPUTE_CLCONVOLUTION3DLAYER_H + +#include "arm_compute/runtime/IFunction.h" + +#include + +namespace arm_compute +{ +class CLCompileContext; +class ICLTensor; +class ITensorInfo; +struct Conv3dInfo; +class Status; + +/** Basic function to compute the convolution3d layer. This function calls the following OpenCL kernels/functions: + * + * -# @ref opencl::ClDirectConv3d + */ +class CLConv3D : public IFunction +{ +public: + /** Construtor */ + CLConv3D(); + /** Destructor */ + ~CLConv3D(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLConv3D(const CLConv3D &) = delete; + /** Default move constructor */ + CLConv3D(CLConv3D &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLConv3D &operator=(const CLConv3D &) = delete; + /** Default move assignment operator */ + CLConv3D &operator=(CLConv3D &&) = default; + /** Set the src and dst tensors. + * + * Valid data layouts: + * - NDHWC + * + * Valid data type configurations: + * |src0 |src1 |src2 |dst | + * |:--------------|:--------------|:------|:--------------| + * |F16 |F16 |F16 |F16 | + * |F32 |F32 |F32 |F32 | + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor. 4 lower dimensions represent a single src [IFM, width, height, depth], + * while every optional dimension from 5 and above represent a batch of srcs. + * @param[in] weights Weights tensor. Weights are 5D tensor with dimensions [OFM, IFM, kernel_w, kernel_h, kernel_d]. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * @param[out] dst Destination tensor. 4 lower dimensions represent a single dst [OFM, width, height, depth], while the rest represent batch of dsts. + * @param[in] conv3d_info Contains strides, padding, rounding, activation, dilation and fast math information. Activation and fast math are currently unused. + * + */ + void configure(const CLCompileContext &compile_context, const ICLTensor *src, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *dst, const Conv3dInfo &conv3d_info); + /** Set the src and dst tensors. + * + * Similar to CLConv3D::configure() but using the default compile context + * + */ + void configure(const ICLTensor *src, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *dst, const Conv3dInfo &conv3d_info); + /** Static function to check if given info will lead to a valid configuration of @ref CLConv3D + * + * Similar to CLConv3D::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; +}; +} +#endif /* ARM_COMPUTE_CLCONVOLUTION3DLAYER_H */ diff --git a/filelist.json b/filelist.json index 4b85408e3d..6a788400b5 100644 --- a/filelist.json +++ b/filelist.json @@ -284,6 +284,7 @@ "files": { "common": [ "src/gpu/cl/kernels/ClDirectConv2dKernel.cpp", + "src/gpu/cl/kernels/ClDirectConv3dKernel.cpp", "src/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp", "src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp", "src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp", @@ -291,10 +292,12 @@ "src/gpu/cl/kernels/ClCol2ImKernel.cpp", "src/gpu/cl/operators/ClConv2d.cpp", "src/gpu/cl/operators/ClDirectConv2d.cpp", + "src/gpu/cl/operators/ClDirectConv3d.cpp", "src/gpu/cl/operators/ClGemmConv2d.cpp", "src/gpu/cl/operators/ClWinogradConv2d.cpp", "src/gpu/cl/kernels/ClWeightsReshapeKernel.cpp", "src/runtime/CL/functions/CLConvolutionLayer.cpp", + "src/runtime/CL/functions/CLConv3D.cpp", "src/runtime/CL/functions/CLDirectConvolutionLayer.cpp", "src/runtime/CL/functions/CLFFTConvolutionLayer.cpp", "src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp", diff --git a/src/core/CL/cl_kernels/common/gemmlowp.cl b/src/core/CL/cl_kernels/common/gemmlowp.cl index 564cbf93cc..f9d18ec976 100644 --- a/src/core/CL/cl_kernels/common/gemmlowp.cl +++ b/src/core/CL/cl_kernels/common/gemmlowp.cl @@ -703,7 +703,7 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t a_offset_s32[0].v *= A_OFFSET; - T_ADD_BROADCAST_X(int, M0, 1, offset_s32, a_offset_s32, offset_s32); + T_ADD_BROADCAST_X(int, M0, N0, offset_s32, a_offset_s32, offset_s32); #endif // defined(A_OFFSET) #if defined(B_OFFSET) @@ -728,7 +728,7 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t T_LOAD(int, 1, N0, BUFFER, biases, xo, 0, 1, 0, bias); - T_ADD_BROADCAST_X(ACC_DATA_TYPE, M0, 1, offset_s32, bias, offset_s32); + T_ADD_BROADCAST_X(int, M0, N0, offset_s32, bias, offset_s32); #endif // defined(ADD_BIAS) LOOP_UNROLLING(int, i, 0, 1, M0, @@ -786,7 +786,7 @@ __kernel void gemmlowp_mm_reshaped_only_rhs_t #endif // defined(REINTERPRET_OUTPUT_AS_3D) }) - const bool cond_x = (xo > (N - N0)) && (PARTIAL_STORE_N0 != 0); + const bool cond_x = (xo > (N - N0)) & (PARTIAL_STORE_N0 != 0); #if defined(FUSED_OUTPUT_STAGE_FIXED_POINT) T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y); diff --git a/src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl b/src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl new file mode 100644 index 0000000000..df8be0091d --- /dev/null +++ b/src/core/CL/cl_kernels/nhwc/direct_convolution3d.cl @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "helpers.h" +#include "tile_helpers.h" + +//! @cond Doxygen_Suppress +/** OpenCL kernel to compute the direct convolution. + * + * @note Data layout supported: NDHWC + * @note Data type supported: F32/F16 + * @note The accumulation data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half) + * @note The convolution padding (left, top and front) must be passed at compile time using -DPAD_LEFT, -DPAD_TOP and -DPAD_FRONT (e.g. -DPAD_LEFT=2, -DPAD_TOP=2, -DPAD_FRONT=2) + * @note The convolution strides must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y and -DSTRIDE_Z (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2, -DSTRIDE_Z=2) + * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH, -DWEI_HEIGHT and -DWEI_DEPTH (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9, -DWEI_DEPTH=9) + * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH, -DSRC_HEIGHT and -DSRC_DEPTH (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64, -DSRC_DEPTH=32) + * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH, -DDST_HEIGHT and -DDST_DEPTH (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64, -DDST_DEPTH=32) + * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64) + * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float) + * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2) + * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2) + * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2) + * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1) + * @note Only the following configurations of M0, N0 and K0 are currently supported: + * - M0 = 1, 2, 3, 4, 5, .... n + * - N0 = 2, 3, 4, 8, 16 + * - K0 = 2, 3, 4, 8, 16 + * + * @note If biases are used then -DHAS_BIAS has to be passed at compile time + * + * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) + * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) + * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr + * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes) + * @param[in] wei_step_w wei_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights matrix + * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr + * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes) + * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix + */ +//! @endcond +__kernel void direct_convolution3d_ndhwc( + TENSOR4D(src, BUFFER), + TENSOR4D(dst, BUFFER), + TENSOR4D(wei, BUFFER) +#if defined(HAS_BIAS) + , + VECTOR_DECLARATION(bia) +#endif // defined(HAS_BIAS) +) +{ +#define _IWEI_WIDTH WEI_WIDTH +#define _IWEI_HEIGHT WEI_HEIGHT +#define _IWEI_DEPTH WEI_DEPTH +#define _ISRC_WIDTH SRC_WIDTH +#define _ISRC_HEIGHT SRC_HEIGHT +#define _ISRC_DEPTH SRC_DEPTH +#define _ISRC_CHANNELS SRC_CHANNELS +#define _IDST_WIDTH DST_WIDTH +#define _IDST_HEIGHT DST_HEIGHT +#define _IDST_DEPTH DST_DEPTH +#define _IDST_CHANNELS DST_CHANNELS +#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT * _IWEI_DEPTH) + + const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM + const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT x DEPTH + const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX + + TILE(int, M0, 1, xi); + TILE(int, M0, 1, yi); + TILE(int, M0, 1, zi); + + // Convert the linear index to coordinate + LOOP_UNROLLING(int, i, 0, 1, M0, + { + xi[i].v = ((mout + i) % _IDST_WIDTH) * STRIDE_X; + yi[i].v = (((mout + i) / _IDST_WIDTH) % _IDST_HEIGHT) * STRIDE_Y; + zi[i].v = (((mout + i) / (_IDST_WIDTH * _IDST_HEIGHT)) % _IDST_DEPTH) * STRIDE_Z; + + xi[i].v -= PAD_LEFT; + yi[i].v -= PAD_TOP; + zi[i].v -= PAD_FRONT; + }) + + // Initialize the accumulators + TILE(ACC_DATA_TYPE, M0, N0, c); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + c[i].v = (ACC_DATA_TYPE)0; + }) + + for(int i = 0; i < _IY_MULTIPLIER; ++i) + { + int ck = 0; + int xk = i % _IWEI_WIDTH; + int yk = (i / _IWEI_WIDTH) % _IWEI_HEIGHT; + int zk = i / (_IWEI_WIDTH * _IWEI_HEIGHT); + + __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes; + + int k = 0; + for(; k <= (_ISRC_CHANNELS - K0); k += K0) + { + TILE(DATA_TYPE, M0, K0, a); + TILE(DATA_TYPE, N0, K0, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = (DATA_TYPE)0; + }) + + // Load tile from the src tensor + T_LOAD_NDHWC_INDIRECT(DATA_TYPE, M0, K0, BUFFER, src, bout, zk, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, _ISRC_DEPTH, src_stride_y, xi, yi, zi, a); + + // Load tile from the weights tensor + const int b_offs = k + (xk * _ISRC_CHANNELS) + (yk * _ISRC_CHANNELS * _IWEI_WIDTH) + (zk * _ISRC_CHANNELS * _IWEI_WIDTH * _IWEI_HEIGHT); + LOOP_UNROLLING(int, i, 0, 1, N0, + { + if((cout + i) < _IDST_CHANNELS) + { + LOOP_UNROLLING(int, j, 0, 1, K0, + { + b[i].s[j] = *(__global DATA_TYPE *)(wei_ptr + wei_offset_first_element_in_bytes + (cout + i) * sizeof(DATA_TYPE) + j * wei_stride_y + b_offs * wei_stride_y); + }) + } + }) + + // Compute the matrix multiplication between two tiles + T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c); + + ck += K0; + } + +#if((_ISRC_CHANNELS % K0) != 0) + // Left-over accumulations + for(; k < _ISRC_CHANNELS; ++k) + { + TILE(DATA_TYPE, M0, 1, a); + TILE(DATA_TYPE, N0, 1, b); + + LOOP_UNROLLING(int, i, 0, 1, M0, + { + a[i].v = (DATA_TYPE)0; + }) + + // Load tile from the src tensor + T_LOAD_NDHWC_INDIRECT(DATA_TYPE, M0, 1, BUFFER, src, bout, zk, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, _ISRC_DEPTH, src_stride_y, xi, yi, zi, a); + + // Load tile from the weights tensor + const int b_offs = k + (xk * _ISRC_CHANNELS) + (yk * _ISRC_CHANNELS * _IWEI_WIDTH) + (zk * _ISRC_CHANNELS * _IWEI_WIDTH * _IWEI_HEIGHT); + LOOP_UNROLLING(int, i, 0, 1, N0, + { + if((cout + i) < _IDST_CHANNELS) + { + b[i].v = *(__global DATA_TYPE *)(wei_ptr + wei_offset_first_element_in_bytes + (cout + i) * sizeof(DATA_TYPE) + b_offs * wei_stride_y); + } + }) + + // // Compute the matrix multiplication between two tiles + T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c); + + ++ck; + } +#endif // ((_ISRC_CHANNELS % K0) != 0) + } + +#if defined(HAS_BIAS) + TILE(DATA_TYPE, 1, N0, bias0); + + if((cout + N0) <= _IDST_CHANNELS) + { + bias0[0].v = VLOAD(N0)(0, (__global DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + cout * sizeof(DATA_TYPE))); + } + else + { + VLOAD_PARTIAL(N0, PARTIAL_N0) + (bias0[0].v, 0, (__global DATA_TYPE *)(bia_ptr + bia_offset_first_element_in_bytes + cout * sizeof(DATA_TYPE))); + } + + // c = c + bias[broadcasted] + T_ADD_BROADCAST_X(ACC_DATA_TYPE, M0, N0, c, bias0, c); + +#endif // HAS_BIAS + + TILE(uint, M0, 1, dst_indirect_y); + + // Calculate the destination indirect Y + LOOP_UNROLLING(int, i, 0, 1, M0, + { + dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH *_IDST_HEIGHT * _IDST_DEPTH) - 1); + dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH *_IDST_HEIGHT * _IDST_DEPTH); + }) + + bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0; + + // Store the tile in reverse order so the invalid values are overwritten with the valid ones + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_N0, BUFFER, dst, cout, dst_stride_y, x_cond, c, dst_indirect_y); +} \ No newline at end of file diff --git a/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl b/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl index d2e7e45ada..58f01fa3ea 100644 --- a/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl +++ b/src/core/CL/cl_kernels/nhwc/dwc_native_fp_nhwc.cl @@ -24,7 +24,6 @@ #include "activation_float_helpers.h" #include "helpers.h" -#include "helpers_asymm.h" #include "tile_helpers.h" #if defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DST_WIDTH) && defined(DST_HEIGHT) && defined(WEI_WIDTH) && defined(WEI_HEIGHT) && defined(N0) && defined(M0) && defined(DILATION_X) && defined(DILATION_Y) && defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) @@ -87,7 +86,7 @@ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes) * @param[in] wei_step_w wei_stride_w * number of elements along W processed per workitem(in bytes) * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the bias matrix - * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16) or S32 (if QASYMM8/QASYMM8_SIGNED) + * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes) * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index ecabecb8fb..f36f273e1d 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -542,8 +542,8 @@ * @param[in] Y Starting Y index * @param[in] X Starting X index * @param[in] C Starting C index - * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension + * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension * @param[in] STRIDE_Y Stride Y (in bytes) * @param[out] xi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect X coordinate * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate @@ -563,6 +563,43 @@ }) \ }) +/** Load a tile from global memory (tensor) when the tensor is stored using a NDHWC layout using indirect X, Y and Z coordinates + * + * @param[in] DATA_TYPE Data type + * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension + * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension + * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported + * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16) + * @param[in] TENSOR Tensor basename + * @param[in] B Starting batch index + * @param[in] Z Starting Z index + * @param[in] Y Starting Y index + * @param[in] X Starting X index + * @param[in] C Starting C index + * @param[in] TENSOR_WIDTH Number of elements to load from X (width) dimension + * @param[in] TENSOR_HEIGHT Number of elements to load from Y (height) dimension + * @param[in] TENSOR_DEPTH Number of elements to load from Z (depth) dimension + * @param[in] STRIDE_Y Stride Y (in bytes) + * @param[out] xi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect X coordinate + * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate + * @param[out] zi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Z coordinate + * @param[out] dst Output tile + */ +#define T_LOAD_NDHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Z, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, TENSOR_DEPTH, STRIDE_Y, xi, yi, zi, dst) \ + ({ \ + LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \ + { \ + int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH) + ((Z) + zi[_i].v) * (TENSOR_WIDTH * TENSOR_HEIGHT); \ + _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT) * (int)(TENSOR_DEPTH); \ + int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT) \ + && ((Z) + zi[_i].v) >= 0 && ((Z) + zi[_i].v) < (int)(TENSOR_DEPTH)); \ + if(_src_valid_y != 0) \ + { \ + dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \ + } \ + }) \ + }) + /** Store a tile to global memory (tensor) using an indirect Y index tile and conditionally use a different length for the store * * @note If WIDTH1_CONDITION is true, the store will use the WIDTH1 length for the store @@ -588,7 +625,7 @@ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ { \ VSTORE_PARTIAL(WIDTH0, WIDTH1) \ - (src[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ + (CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ }) \ } \ else \ @@ -596,7 +633,7 @@ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ { \ VSTORE(WIDTH0) \ - (src[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ + (CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ }) \ } \ }) @@ -892,18 +929,18 @@ * @note Performs: LHS + RHS[broadcasted] = DST * @note Both tiles must have same data type * - * @param[in] DATA_TYPE LHS/RHS/DST data type - * @param[in] M0 Number of LHS rows - * @param[in] N0 Number of LHS columns - * @param[in] lhs LHS tile - * @param[in] rhs RHS tile - * @param[out] dst DST tile + * @param[in] DST_DATA_TYPE DST data type + * @param[in] M0 Number of LHS rows + * @param[in] N0 Number of LHS columns + * @param[in] lhs LHS tile + * @param[in] rhs RHS tile + * @param[out] dst DST tile */ -#define T_ADD_BROADCAST_X(DATA_TYPE, M0, N0, lhs, rhs, dst) \ +#define T_ADD_BROADCAST_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \ ({ \ LOOP_UNROLLING(int, _m0, 0, 1, M0, \ { \ - dst[_m0].v = lhs[_m0].v + rhs[0].v; \ + dst[_m0].v = CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)) + CONVERT(rhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)); \ }) \ }) @@ -926,6 +963,7 @@ #define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) +#define T_MMUL_NT_T_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T_char_char_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) #define T_MMUL_NT_T_uchar_uchar_uint(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) @@ -938,7 +976,7 @@ { \ LOOP_UNROLLING(int, _k, 0, 1, K0, \ { \ - dst[_m].s[_n] = fma((lhs[_m].s[_k]), (rhs[_n].s[_k]), dst[_m].s[_n]); \ + dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (DST_DATA_TYPE)(rhs[_n].s[_k]), dst[_m].s[_n]); \ }) \ }) \ }) \ diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp index c05bb96753..4af42262b9 100644 --- a/src/gpu/cl/ClKernelLibrary.cpp +++ b/src/gpu/cl/ClKernelLibrary.cpp @@ -433,6 +433,7 @@ const std::map ClKernelLibrary::_kernel_program_map = { "dwc_native_fp_nhwc", "nhwc/dwc_native_fp_nhwc.cl" }, { "dwc_native_quantized_nhwc", "nhwc/dwc_native_quantized_nhwc.cl" }, { "direct_convolution_nhwc", "nhwc/direct_convolution.cl" }, + { "direct_convolution3d_ndhwc", "nhwc/direct_convolution3d.cl" }, { "im2col3x3_nhwc", "nhwc/im2col.cl" }, { "im2col9x9_nhwc", "nhwc/im2col.cl" }, { "im2col_generic_nhwc", "nhwc/im2col.cl" }, @@ -856,6 +857,10 @@ const std::map ClKernelLibrary::_program_source_map = { "nhwc/direct_convolution.cl", #include "./cl_kernels/nhwc/direct_convolution.clembed" + }, + { + "nhwc/direct_convolution3d.cl", +#include "./cl_kernels/nhwc/direct_convolution3d.clembed" }, { "nhwc/dwc_native_fp_nhwc.cl", diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp index cbeb9c43e9..2d851a6982 100644 --- a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp +++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp @@ -60,19 +60,17 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != src->dimension(channel_idx), - "Weights feature map dimension should match the respective src's one"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != src->dimension(channel_idx), "Weights feature map dimension should match the respective src's one"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9) - && std::get<0>(conv_info.stride()) > 2, - "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(data_layout != DataLayout::NHWC && !is_data_type_float(src->data_type()) && act_info.enabled(), - "Activation supported only for floating point and NHWC."); if(data_layout == DataLayout::NCHW) { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9) && std::get<0>(conv_info.stride()) > 2, + "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_data_type_float(src->data_type()) && act_info.enabled(), "Activation supported only for floating point and NHWC."); + if(is_data_type_quantized(src->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9, @@ -96,7 +94,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); } ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(3), - "Biases size and number of src feature maps should match"); + "Biases size and number of dst feature maps should match"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1, "Biases should be one dimensional"); } diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.h b/src/gpu/cl/kernels/ClDirectConv2dKernel.h index 4041c7bf27..5624f3a0a7 100644 --- a/src/gpu/cl/kernels/ClDirectConv2dKernel.h +++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.h @@ -42,9 +42,9 @@ public: ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDirectConv2dKernel); /** Set the src, weights, biases and dst tensors info. * - * @note: Due to set_valid_region(), thus src/weights/biases cannot be const. Need to change this once the set_valid_region() is removed. + * @note: Due to set_valid_region() in NCHW, src/weights/biases cannot be const. Need to change this once the set_valid_region() is removed. * - * @note: DirectConvolution only works in the following configurations: + * @note: DirectConvolution only works in the following configurations for the NCHW data layout: * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 * 3x3 convolution with stride_x = 1/2, stride_y = 1/2 * 5x5 convolution with stride_x = 1/2, stride_y = 1/2 diff --git a/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp new file mode 100644 index 0000000000..1c4326b494 --- /dev/null +++ b/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/gpu/cl/kernels/ClDirectConv3dKernel.h" + +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "src/core/CL/CLValidate.h" +#include "src/core/helpers/WindowHelpers.h" +#include "support/Cast.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +namespace +{ +Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info) +{ + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, weights, dst); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NDHWC, "Only NDHWC layout supported"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv3d_info.act_info.enabled(), "Fused activation not supported"); + + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(1) != src->dimension(0), "Weights feature map dimension should match the respective src's one"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 5, "Weights can be at most 5 dimensional"); + + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) > (src->dimension(1) + conv3d_info.padding.left + conv3d_info.padding.right)); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(3) > (src->dimension(2) + conv3d_info.padding.top + conv3d_info.padding.bottom)); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(4) > (src->dimension(3) + conv3d_info.padding.front + conv3d_info.padding.back)); + + if(biases != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(0), "Biases size and number of dst feature maps should match"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1, "Biases should be one dimensional"); + } + + // Checks performed when dst is configured + if(dst->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->dimension(0) != weights->dimension(0), "Weights and dst OFMs should match"); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), misc::shape_calculator::compute_conv3d_shape(src->tensor_shape(), weights->tensor_shape(), conv3d_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); + } + + return Status{}; +} +} // namespace + +ClDirectConv3dKernel::ClDirectConv3dKernel() +{ + _type = CLKernelType::DIRECT; +} + +void ClDirectConv3dKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, + const Conv3dInfo &conv3d_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst); + + // Perform validation + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, dst, conv3d_info)); + + // Create window and update padding + const DataType data_type = src->data_type(); + const size_t src_width = src->dimension(1); + const size_t src_height = src->dimension(2); + const size_t src_depth = src->dimension(3); + const size_t src_channels = src->dimension(0); + const size_t dst_width = dst->dimension(1); + const size_t dst_height = dst->dimension(2); + const size_t dst_depth = dst->dimension(3); + const size_t dst_channels = dst->dimension(0); + const size_t weights_width = weights->dimension(2); + const size_t weights_height = weights->dimension(3); + const size_t weights_depth = weights->dimension(4); + const size_t pad_left = conv3d_info.padding.left; + const size_t pad_top = conv3d_info.padding.top; + const size_t pad_front = conv3d_info.padding.front; + const size_t conv_stride_x = conv3d_info.stride.x(); + const size_t conv_stride_y = conv3d_info.stride.y(); + const size_t conv_stride_z = conv3d_info.stride.z(); + + const size_t n0 = std::min(dst->dimension(0), static_cast(4u)); + const size_t m0 = (dst->tensor_shape()[0] > 16) ? ((data_type == DataType::F32) ? 2U : 4U) : 1U; + const size_t k0 = adjust_vec_size(8u, src->dimension(0)); + const size_t partial_store_n0 = dst->dimension(0) % n0; + + CLBuildOptions build_options; + build_options.add_option("-cl-fast-relaxed-math"); + build_options.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); + build_options.add_option("-DACC_DATA_TYPE=float"); + build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src_width)); + build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src_height)); + build_options.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(src_depth)); + build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src_channels)); + build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst_width)); + build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst_height)); + build_options.add_option("-DDST_DEPTH=" + support::cpp11::to_string(dst_depth)); + build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst_channels)); + build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(weights_width)); + build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(weights_height)); + build_options.add_option("-DWEI_DEPTH=" + support::cpp11::to_string(weights_depth)); + build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x)); + build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_stride_y)); + build_options.add_option("-DSTRIDE_Z=" + support::cpp11::to_string(conv_stride_z)); + build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left)); + build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top)); + build_options.add_option("-DPAD_FRONT=" + support::cpp11::to_string(pad_front)); + build_options.add_option("-DN0=" + support::cpp11::to_string(n0)); + build_options.add_option("-DM0=" + support::cpp11::to_string(m0)); + build_options.add_option("-DK0=" + support::cpp11::to_string(k0)); + build_options.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0)); + build_options.add_option_if(biases != nullptr, std::string("-DHAS_BIAS")); + + std::string kernel_name = "direct_convolution3d_ndhwc"; + _kernel = create_kernel(compile_context, kernel_name, build_options.options()); + + // Configure kernel window + Window win = calculate_max_window(*dst, Steps(n0, m0)); + ICLKernel::configure_internal(win); + + // Set config_id for enabling LWS tuning + _config_id = kernel_name; + _config_id += "_"; + _config_id += lower_string(string_from_data_type(data_type)); + _config_id += "_"; + _config_id += support::cpp11::to_string(weights_width); + _config_id += "_"; + _config_id += support::cpp11::to_string(weights_height); + _config_id += "_"; + _config_id += support::cpp11::to_string(weights_depth); + _config_id += "_"; + _config_id += support::cpp11::to_string(conv_stride_x); + _config_id += "_"; + _config_id += support::cpp11::to_string(conv_stride_y); + _config_id += "_"; + _config_id += support::cpp11::to_string(conv_stride_z); + _config_id += "_"; + _config_id += support::cpp11::to_string(dst_width); + _config_id += "_"; + _config_id += support::cpp11::to_string(dst_height); + _config_id += "_"; + _config_id += support::cpp11::to_string(dst_channels); +} + +Status ClDirectConv3dKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv3d_info)); + return Status{}; +} + +void ClDirectConv3dKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + + const auto src = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); + const auto weights = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); + const auto biases = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_2)); + auto dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); + + // Get initial windows + Window slice = window.first_slice_window_3D(); + slice.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(dst->info()->dimension(1) * dst->info()->dimension(2) * dst->info()->dimension(3), slice.y().step()), slice.y().step())); + slice.set(Window::DimZ, Window::Dimension(0, dst->info()->dimension(4), 1)); + + unsigned int idx = 0; + add_4D_tensor_argument(idx, src, slice); + add_4D_tensor_argument(idx, dst, slice); + add_4D_tensor_argument(idx, weights, slice); + if(biases != nullptr) + { + add_1D_tensor_argument(idx, biases, slice); + } + enqueue(queue, *this, slice, lws_hint()); +} +} // namespace kernels +} // namespace opencl +} // namespace arm_compute diff --git a/src/gpu/cl/kernels/ClDirectConv3dKernel.h b/src/gpu/cl/kernels/ClDirectConv3dKernel.h new file mode 100644 index 0000000000..9ac8f0d7b3 --- /dev/null +++ b/src/gpu/cl/kernels/ClDirectConv3dKernel.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_DIRECT_CONV3D_KERNEL_H +#define ARM_COMPUTE_CL_DIRECT_CONV3D_KERNEL_H + +#include "src/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +class CLCompileContext; +struct Conv3dInfo; + +namespace opencl +{ +namespace kernels +{ +/** Interface for the direct convolution 3d kernel. */ +class ClDirectConv3dKernel : public IClKernel +{ +public: + /** Construtor */ + ClDirectConv3dKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + ClDirectConv3dKernel(const ClDirectConv3dKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + ClDirectConv3dKernel &operator=(const ClDirectConv3dKernel &) = delete; + /** Default move constructor */ + ClDirectConv3dKernel(ClDirectConv3dKernel &&) = default; + /** Default move assignment operator */ + ClDirectConv3dKernel &operator=(ClDirectConv3dKernel &&) = default; + /** Set the src, weights, biases and dst tensors info. + * + * Valid data layouts: + * - NDHWC + * + * Valid data type configurations: + * |src0 |src1 |src2 |dst | + * |:--------------|:--------------|:------|:--------------| + * |F16 |F16 |F16 |F16 | + * |F32 |F32 |F32 |F32 | + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor. 4 lower dimensions represent a single src [IFM, width, height, depth], + * while every optional dimension from 5 and above represent a batch of srcs. + * @param[in] weights Weights tensor. Weights are 5D tensor with dimensions [OFM, IFM, kernel_w, kernel_h, kernel_d]. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * @param[out] dst Destination tensor. 4 lower dimensions represent a single dst [OFM, width, height, depth], while the rest represent batch of dsts. + * @param[in] conv3d_info Contains strides, padding, rounding, activation, dilation and fast math information. Activation and fast math are currently unused. + */ + void configure(const CLCompileContext &compile_context, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const Conv3dInfo &conv3d_info); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to ClDirectConv3dKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_DIRECT_CONV3D_KERNEL_H */ diff --git a/src/gpu/cl/operators/ClDirectConv2d.cpp b/src/gpu/cl/operators/ClDirectConv2d.cpp index 066959f400..d2e4049a09 100644 --- a/src/gpu/cl/operators/ClDirectConv2d.cpp +++ b/src/gpu/cl/operators/ClDirectConv2d.cpp @@ -25,7 +25,6 @@ #include "arm_compute/runtime/CL/CLScheduler.h" #include "src/core/CL/kernels/CLFillBorderKernel.h" -#include "src/gpu/cl/ClCompileContext.h" #include "src/gpu/cl/kernels/ClActivationKernel.h" #include "src/gpu/cl/kernels/ClDirectConv2dKernel.h" diff --git a/src/gpu/cl/operators/ClDirectConv3d.cpp b/src/gpu/cl/operators/ClDirectConv3d.cpp new file mode 100644 index 0000000000..d10165814b --- /dev/null +++ b/src/gpu/cl/operators/ClDirectConv3d.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/gpu/cl/operators/ClDirectConv3d.h" + +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "src/gpu/cl/kernels/ClDirectConv3dKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +void ClDirectConv3d::configure(const CLCompileContext &compile_context, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const Conv3dInfo &conv3d_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src); + + // Configure direct convolution 3d kernel + auto k = std::make_unique(); + k->configure(compile_context, src, weights, biases, dst, conv3d_info); + _direct_conv3d_kernel = std::move(k); +} + +Status ClDirectConv3d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClDirectConv3dKernel::validate(src, weights, biases, dst, conv3d_info)); + return Status{}; +} + +void ClDirectConv3d::run(ITensorPack &tensors) +{ + // Run direct convolution 3d + CLScheduler::get().enqueue_op(*_direct_conv3d_kernel.get(), tensors, true); +} +} // namespace opencl +} // namespace arm_compute diff --git a/src/gpu/cl/operators/ClDirectConv3d.h b/src/gpu/cl/operators/ClDirectConv3d.h new file mode 100644 index 0000000000..ce9135b812 --- /dev/null +++ b/src/gpu/cl/operators/ClDirectConv3d.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_DIRECT_CONV3D_H +#define ARM_COMPUTE_CL_DIRECT_CONV3D_H + +#include "src/gpu/cl/IClKernel.h" +#include "src/gpu/cl/IClOperator.h" + +#include + +namespace arm_compute +{ +class CLCompileContext; +struct Conv3dInfo; +class IClKernel; + +namespace opencl +{ +/** Basic function to simulate a directly convolution layer with 3 spatial dimensions. This function calls the following OpenCL kernels: + * + * -# @ref opencl::ClDirectConv3d + */ +class ClDirectConv3d : public IClOperator +{ +public: + ClDirectConv3d() = default; + /** Set the src and dst tensors. + * + * Valid data layouts: + * - NDHWC + * + * Valid data type configurations: + * |src0 |src1 |src2 |dst | + * |:--------------|:--------------|:------|:--------------| + * |F16 |F16 |F16 |F16 | + * |F32 |F32 |F32 |F32 | + * + * @param[in] compile_context The compile context to be used. + * @param[in] src Source tensor. 4 lower dimensions represent a single src [IFM, width, height, depth], + * while every optional dimension from 5 and above represent a batch of srcs. + * @param[in] weights Weights tensor. Weights are 5D tensor with dimensions [OFM, IFM, kernel_w, kernel_h, kernel_d]. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * @param[out] dst Destination tensor. 4 lower dimensions represent a single dst [OFM, width, height, depth], while the rest represent batch of dsts. + * @param[in] conv3d_info Contains strides, padding, rounding, activation, dilation and fast math information. Activation and fast math are currently unused. + * + */ + void configure(const CLCompileContext &compile_context, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const Conv3dInfo &conv3d_info); + + /** Static function to check if given info will lead to a valid configuration + * + * Similar to ClDirectConv3d::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info); + + // Inherited method overridden + void run(ITensorPack &tensors) override; + +private: + std::unique_ptr _direct_conv3d_kernel{ nullptr }; +}; +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_DIRECT_CONV3D_H */ \ No newline at end of file diff --git a/src/runtime/CL/functions/CLConv3D.cpp b/src/runtime/CL/functions/CLConv3D.cpp new file mode 100644 index 0000000000..729b973b6a --- /dev/null +++ b/src/runtime/CL/functions/CLConv3D.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLConv3D.h" + +#include "arm_compute/core/CL/ICLTensor.h" +#include "src/gpu/cl/operators/ClDirectConv3d.h" + +namespace arm_compute +{ +using namespace arm_compute::experimental; + +struct CLConv3D::Impl +{ + const ICLTensor *src{ nullptr }; + const ICLTensor *weights{ nullptr }; + const ICLTensor *biases{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLConv3D::CLConv3D() + : _impl(std::make_unique()) +{ +} + +CLConv3D::~CLConv3D() = default; + +void CLConv3D::configure(const ICLTensor *src, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *dst, const Conv3dInfo &conv3d_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), src, weights, biases, dst, conv3d_info); +} + +void CLConv3D::configure(const CLCompileContext &compile_context, const ICLTensor *src, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *dst, const Conv3dInfo &conv3d_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst); + ARM_COMPUTE_ERROR_THROW_ON(CLConv3D::validate(src->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), dst->info(), conv3d_info)); + + _impl->src = src; + _impl->weights = weights; + _impl->biases = biases; + _impl->dst = dst; + + _impl->op = std::make_unique(); + _impl->op->configure(compile_context, _impl->src->info(), _impl->weights->info(), _impl->biases ? _impl->biases->info() : nullptr, _impl->dst->info(), conv3d_info); +} + +Status CLConv3D::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv3dInfo &conv3d_info) +{ + return opencl::ClDirectConv3d::validate(src, weights, biases, dst, conv3d_info); +} + +void CLConv3D::run() +{ + ITensorPack pack; + pack.add_tensor(TensorType::ACL_SRC_0, _impl->src); + pack.add_tensor(TensorType::ACL_SRC_1, _impl->weights); + pack.add_tensor(TensorType::ACL_SRC_2, _impl->biases); + pack.add_tensor(TensorType::ACL_DST, _impl->dst); + _impl->op->run(pack); +} +} // namespace arm_compute diff --git a/tests/validation/CL/Convolution3D.cpp b/tests/validation/CL/Convolution3D.cpp new file mode 100644 index 0000000000..75e2e99b03 --- /dev/null +++ b/tests/validation/CL/Convolution3D.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/functions/CLConv3D.h" +#include "arm_compute/runtime/FunctionDescriptors.h" +#include "tests/CL/CLAccessor.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/DirectConvolution3DFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +RelativeTolerance tolerance_fp16(half(0.2)); /**< Tolerance for floating point tests */ +RelativeTolerance tolerance_fp32(0.05f); /**< Tolerance for floating point tests */ +constexpr float abs_tolerance_f32(0.0001f); /**< Absolute tolerance for FP32 tests*/ +constexpr float tolerance_num = 0.07f; /**< Tolerance number */ +} // namespace + +TEST_SUITE(CL) +TEST_SUITE(DirectConvolution3D) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(27U, 13U, 5U, 3U), // Unsupported data layout + TensorShape(27U, 13U, 5U, 3U), // Unsupported activation enabled + TensorShape(27U, 13U, 5U, 3U), // Mismatching data type + TensorShape(27U, 13U, 5U, 3U), // Unsupported data type + TensorShape(27U, 13U, 5U, 3U), // Mismatching input feature maps + TensorShape(27U, 13U, 5U, 3U), // Mismatching output feature maps + TensorShape(27U, 13U, 5U, 3U), // Mismatching bias shape + TensorShape(27U, 13U, 5U, 3U), // Unsupported number of weights dimensions + TensorShape(27U, 13U, 5U, 3U), // Unsupported number of biases dimensions + TensorShape(27U, 13U, 5U, 3U), // Mismatching output shape + TensorShape(27U, 13U, 5U, 3U) + }), + framework::dataset::make("WeightsShape", { TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 32U, 3U, 3U, 3U), + TensorShape(8U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U, 2U), + TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U), + TensorShape(4U, 27U, 3U, 3U, 3U) + })), + framework::dataset::make("BiasesShape", { TensorShape(4U), + TensorShape(4U), + TensorShape(4U), + TensorShape(4U), + TensorShape(4U), + TensorShape(4U), + TensorShape(8U), + TensorShape(4U), + TensorShape(4U), + TensorShape(4U), + TensorShape(4U) + })), + framework::dataset::make("OutputShape", { TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U, 2U), + TensorShape(4U, 11U, 5U, 3U), + TensorShape(4U, 13U, 5U, 3U) + })), + framework::dataset::make("Conv3dInfo", { Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false), + Conv3dInfo(Size3D(1U, 1U, 1U), Padding3D(1U, 1U, 1U), ActivationLayerInfo(), Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false) + })), + framework::dataset::make("SrcDataType", { DataType::F32, + DataType::F32, + DataType::F32, + DataType::QASYMM8, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32 + })), + framework::dataset::make("WeightsDataType", { DataType::F32, + DataType::F32, + DataType::F16, + DataType::QASYMM8, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32, + DataType::F32 + })), + framework::dataset::make("DataLayout", { DataLayout::NCDHW, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC, + DataLayout::NDHWC + })), + framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false, false, true })), + input_shape, weights_shape, biases_shape, output_shape, conv3d_info, src_data_type, weights_data_type, data_layout, expected) +{ + TensorInfo input_info = TensorInfo(input_shape, 1, src_data_type); + TensorInfo weights_info = TensorInfo(weights_shape, 1, weights_data_type); + TensorInfo biases_info = TensorInfo(biases_shape, 1, src_data_type); + TensorInfo output_info = TensorInfo(output_shape, 1, src_data_type); + + input_info.set_data_layout(data_layout); + weights_info.set_data_layout(data_layout); + biases_info.set_data_layout(data_layout); + output_info.set_data_layout(data_layout); + + bool is_valid = bool(CLConv3D::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv3d_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} + +template +using CLDirectConvolution3DFixture = DirectConvolution3DValidationFixture; + +TEST_SUITE(NDHWC) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolution3DFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(7U, 5U, 3U, 13U, 3U), + TensorShape(15U, 7U, 11U, 7U), + TensorShape(19U, 5U, 16U, 4U), + TensorShape(13U, 5U, 17U, 2U) + }), + framework::dataset::make("StrideX", { 1, 3, 2, 1 })), + framework::dataset::make("StrideY", { 2, 1, 3, 1 })), + framework::dataset::make("StrideZ", { 3, 2, 1, 1 })), + framework::dataset::make("PadX", { 0, 2, 1, 0 })), + framework::dataset::make("PadY", { 1, 0, 2, 0 })), + framework::dataset::make("PadZ", { 2, 1, 0, 0 })), + framework::dataset::make("KernelWidth", { 3, 7, 5, 1 })), + framework::dataset::make("KernelHeight", { 5, 3, 7, 1 })), + framework::dataset::make("KernelDepth", { 7, 5, 3, 1 })), + framework::dataset::make("NumKernels", { 5, 3, 1, 11 })), + framework::dataset::make("HasBias", { true, true, true, false })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataLayout", DataLayout::NDHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_fp16, tolerance_num); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolution3DFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(400U, 400U, 200U, 11U) }), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 1 })), + framework::dataset::make("StrideZ", { 1 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 1 })), + framework::dataset::make("PadZ", { 1 })), + framework::dataset::make("KernelWidth", { 5 })), + framework::dataset::make("KernelHeight", { 5 })), + framework::dataset::make("KernelDepth", { 5 })), + framework::dataset::make("NumKernels", { 300 })), + framework::dataset::make("HasBias", { true })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataLayout", DataLayout::NDHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_fp16, tolerance_num); +} + +TEST_SUITE_END() // FP16 + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolution3DFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(7U, 5U, 3U, 13U, 3U), + TensorShape(15U, 7U, 11U, 7U), + TensorShape(19U, 5U, 16U, 4U), + TensorShape(13U, 5U, 17U, 2U) + }), + framework::dataset::make("StrideX", { 1, 3, 2, 1 })), + framework::dataset::make("StrideY", { 2, 1, 3, 1 })), + framework::dataset::make("StrideZ", { 3, 2, 1, 1 })), + framework::dataset::make("PadX", { 0, 2, 1, 0 })), + framework::dataset::make("PadY", { 1, 0, 2, 0 })), + framework::dataset::make("PadZ", { 2, 1, 0, 0 })), + framework::dataset::make("KernelWidth", { 3, 7, 5, 1 })), + framework::dataset::make("KernelHeight", { 5, 3, 7, 1 })), + framework::dataset::make("KernelDepth", { 7, 5, 3, 1 })), + framework::dataset::make("NumKernels", { 5, 3, 1, 11 })), + framework::dataset::make("HasBias", { true, true, true, false })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", DataLayout::NDHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_fp32, 0.0, abs_tolerance_f32); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolution3DFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(400U, 400U, 200U, 11U) }), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 1 })), + framework::dataset::make("StrideZ", { 1 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 1 })), + framework::dataset::make("PadZ", { 1 })), + framework::dataset::make("KernelWidth", { 9 })), + framework::dataset::make("KernelHeight", { 9 })), + framework::dataset::make("KernelDepth", { 9 })), + framework::dataset::make("NumKernels", { 300 })), + framework::dataset::make("HasBias", { true })), + framework::dataset::make("Activation", ActivationLayerInfo())), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", DataLayout::NDHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_fp32, 0.0, abs_tolerance_f32); +} + +// clang-format on +// *INDENT-ON* +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // NDHWC +TEST_SUITE_END() // DirectConvolution3D +TEST_SUITE_END() // CL + +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/DirectConvolution3DFixture.h b/tests/validation/fixtures/DirectConvolution3DFixture.h index 717f6f281e..3a675ac6d3 100644 --- a/tests/validation/fixtures/DirectConvolution3DFixture.h +++ b/tests/validation/fixtures/DirectConvolution3DFixture.h @@ -41,12 +41,12 @@ class DirectConvolution3DValidationGenericFixture : public framework::Fixture { public: template - void setup(TensorShape input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth, - unsigned int num_kernels, bool has_bias, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout) + void setup(const TensorShape &input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth, + unsigned int num_kernels, bool has_bias, const ActivationLayerInfo &act_info, const DataType &data_type, const DataLayout &data_layout) { ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NDHWC); - TensorShape weights_shape(num_kernels, input_shape[0], kernel_width, kernel_height, kernel_depth); + const TensorShape weights_shape(num_kernels, input_shape[0], kernel_width, kernel_height, kernel_depth); const TensorShape bias_shape(num_kernels); const Conv3dInfo conv3d_info(Size3D(stride_x, stride_y, stride_z), Padding3D(pad_x, pad_y, pad_z), act_info, Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false); const TensorShape output_shape = compute_conv3d_shape(input_shape, weights_shape, conv3d_info); @@ -78,7 +78,7 @@ protected: } } - TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const Conv3dInfo &conv3d_info, + TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const Conv3dInfo &conv3d_info, bool has_bias, const DataType &data_type, const DataLayout &data_layout) { // Create tensors diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h index 0bea408519..3e73b906db 100644 --- a/utils/TypePrinter.h +++ b/utils/TypePrinter.h @@ -1982,6 +1982,33 @@ inline std::string to_string(const Size2D &type) return str.str(); } +/** Formatted output of the Size3D type. + * + * @param[out] os Output stream + * @param[in] size Type to output + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Size3D &size) +{ + os << size.width << "x" << size.height << "x" << size.depth; + + return os; +} + +/** Formatted output of the Size2D type. + * + * @param[in] type Type to output + * + * @return Formatted string. + */ +inline std::string to_string(const Size3D &type) +{ + std::stringstream str; + str << type; + return str.str(); +} + /** Formatted output of the ConvolutionMethod type. * * @param[out] os Output stream @@ -2919,6 +2946,107 @@ inline std::string to_string(const BoxNMSLimitInfo &info) return str.str(); } +/** Formatted output of the DimensionRoundingType type. + * + * @param[out] os Output stream. + * @param[in] rounding_type DimensionRoundingType to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const DimensionRoundingType &rounding_type) +{ + switch(rounding_type) + { + case DimensionRoundingType::CEIL: + os << "CEIL"; + break; + case DimensionRoundingType::FLOOR: + os << "FLOOR"; + break; + default: + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + } + return os; +} + +/** Converts a @ref DimensionRoundingType to string + * + * @param[in] rounding_type DimensionRoundingType value to be converted + * + * @return String representing the corresponding DimensionRoundingType + */ +inline std::string to_string(const DimensionRoundingType &rounding_type) +{ + std::stringstream str; + str << rounding_type; + return str.str(); +} + +/** Formatted output of the Padding3D type. + * + * @param[out] os Output stream. + * @param[in] padding3d Padding3D to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Padding3D &padding3d) +{ + os << padding3d.left << "," << padding3d.right << "," + << padding3d.top << "," << padding3d.bottom << "," + << padding3d.front << "," << padding3d.back; + return os; +} + +/** Converts a @ref Padding3D to string + * + * @param[in] padding3d Padding3D value to be converted + * + * @return String representing the corresponding Padding3D + */ +inline std::string to_string(const Padding3D &padding3d) +{ + std::stringstream str; + str << padding3d; + return str.str(); +} + +/** Formatted output of the Conv3dInfo type. + * + * @param[out] os Output stream. + * @param[in] conv3d_info Type to output. + * + * @return Modified output stream. + */ +inline ::std::ostream &operator<<(::std::ostream &os, const Conv3dInfo &conv3d_info) +{ + os << conv3d_info.stride; + os << ";"; + os << conv3d_info.padding; + os << ";"; + os << to_string(conv3d_info.act_info); + os << ";"; + os << conv3d_info.dilation; + os << ";"; + os << conv3d_info.round_type; + os << ";"; + os << conv3d_info.enable_fast_math; + + return os; +} + +/** Formatted output of the Conv3dInfo type. + * + * @param[in] conv3d_info Type to output. + * + * @return Formatted string. + */ +inline std::string to_string(const Conv3dInfo &conv3d_info) +{ + std::stringstream str; + str << conv3d_info; + return str.str(); +} + } // namespace arm_compute #endif /* __ARM_COMPUTE_TYPE_PRINTER_H__ */ -- cgit v1.2.1