aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-04-01 13:50:22 +0100
committerGian Marco Iodice <gianmarco.iodice@arm.com>2019-05-10 08:26:44 +0000
commit46a49a0a8206f0efa7afd514940e180a88ffd732 (patch)
tree0ec53af4ef65037e357b1d8f6a1d1f65075659f7
parent879e8dd2fc8523e4059ba9ced9ea0edb57103778 (diff)
downloadComputeLibrary-46a49a0a8206f0efa7afd514940e180a88ffd732.tar.gz
COMPMID-1635: Optimize CLDeconvolutionLayer - Part III
Change-Id: Id2661e093a669ef3eaf2a5116cd278a80c1d5a89 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/935 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com> Comments-Addressed: Isabella Gottardi <isabella.gottardi@arm.com> Tested-by: Isabella Gottardi <isabella.gottardi@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernelLibrary.h4
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h93
-rw-r--r--arm_compute/core/Types.h7
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h10
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h144
-rw-r--r--src/core/CL/CLKernelLibrary.cpp3
-rw-r--r--src/core/CL/cl_kernels/deconvolution_layer.cl78
-rw-r--r--src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp195
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp2
-rw-r--r--src/runtime/CL/functions/CLDeconvolutionLayer.cpp65
-rw-r--r--src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp13
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp2
-rw-r--r--src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp373
-rw-r--r--tests/validation/CL/DeconvolutionLayer.cpp40
16 files changed, 1013 insertions, 18 deletions
diff --git a/arm_compute/core/CL/CLKernelLibrary.h b/arm_compute/core/CL/CLKernelLibrary.h
index c1999b45e1..741e47c65f 100644
--- a/arm_compute/core/CL/CLKernelLibrary.h
+++ b/arm_compute/core/CL/CLKernelLibrary.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -333,7 +333,7 @@ public:
* @param[in] built_program_name Name of the program
* @param[in] program Built program to add to the cache
*/
- void add_built_program(const std::string &built_program_name, cl::Program program);
+ void add_built_program(const std::string &built_program_name, const cl::Program &program);
/** Returns true if FP16 is supported by the CL device
*
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index 57498715c8..3f5a7dc241 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -48,6 +48,7 @@
#include "arm_compute/core/CL/kernels/CLCopyKernel.h"
#include "arm_compute/core/CL/kernels/CLCropKernel.h"
#include "arm_compute/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
+#include "arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
new file mode 100644
index 0000000000..4b6284cfff
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDECONVOLUTIONLAYERRESHAPEOUTPUTKERNEL_H__
+#define __ARM_COMPUTE_CLDECONVOLUTIONLAYERRESHAPEOUTPUTKERNEL_H__
+
+#include "arm_compute/core/CL/ICLSimpleKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the OpenCL kernel to be used for reshaping the tensor before returning the result of deconvolution.
+ *
+ * The input tensor to this OpenCL kernel is expected to be the result of a @ref CLGEMM operation between the Deconvolution input and the Deconvolution filter.
+ *
+ * The input tensor should have the following shape: [filter_width * filter_height * ofms, width, height, batch_size]
+ *
+ * The output tensor should have the following shape: [stride_x * (input_width - 1) + filter_width - 2 * padx, stride_y * (input_height - 1) + filter_height - 2 * pady, ofms, batch_size]
+ *
+ * For example, given a tensor with dimensions [4, 2, 2] this function returns a tensor with dimensions [1, 4, 4].
+ *
+ */
+class CLDeconvolutionReshapeOutputKernel : public ICLSimpleKernel
+{
+public:
+ /** Default constructor */
+ CLDeconvolutionReshapeOutputKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDeconvolutionReshapeOutputKernel(const CLDeconvolutionReshapeOutputKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDeconvolutionReshapeOutputKernel &operator=(const CLDeconvolutionReshapeOutputKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLDeconvolutionReshapeOutputKernel(CLDeconvolutionReshapeOutputKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLDeconvolutionReshapeOutputKernel &operator=(CLDeconvolutionReshapeOutputKernel &&) = default;
+ /** Default destructor */
+ ~CLDeconvolutionReshapeOutputKernel() = default;
+
+ /** Initialise the kernel's source and destination.
+ *
+ * @param[in] input Input tensor. Supported data types: F16/F32.
+ * @param[in] bias Bias tensor to be added directly during the reshape operation. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[out] output Output tensor with the following shape: [stride_x * (input_width - 1) + filter_width - 2 * padx, stride_y * (input_height - 1) + filter_height - 2 * pady, ofms, batch_size]
+ * Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] input_info Deconvolution input tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] weights_info Deconvolution weights tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
+ */
+ void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionReshapeOutputKernel.
+ *
+ * @param[in] input GEMM output tensor info to be reshaped. Supported data types: F16/F32.
+ * @param[in] bias (Optional) Optional bias tensor info to be added directly during the reshape operation. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] output Reshaped output tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] input_info Original input tensor info. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] weights_info Original weights tensor info output. Supported data types: same as @p input. Supported data layouts: same as @p input.
+ * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This kernel supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
+ *
+ * @return a Status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ bool _add_bias;
+ const ICLTensor *_bias;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLDECONVOLUTIONLAYERRESHAPEOUTPUTKERNEL_H__ */
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 24e91bd3c5..a2dfbb7d82 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -134,6 +134,13 @@ enum class ConvolutionMethod
FFT /**< Convolution using FFT */
};
+/** Available DeconvolutionMethod*/
+enum class DeconvolutionMethod
+{
+ GEMM, /**< Deconvolution using GEMM */
+ DIRECT, /**< Direct deconvolution */
+};
+
/** Padding mode to use for PadLayer */
enum class PaddingMode
{
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index a4fcdc27ac..129be4b307 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -77,6 +77,7 @@
#include "arm_compute/runtime/CL/functions/CLFuseBatchNormalization.h"
#include "arm_compute/runtime/CL/functions/CLGEMM.h"
#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
index b613708c50..e5b406ee5e 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
@@ -25,6 +25,7 @@
#define __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__
#include "arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
@@ -34,6 +35,7 @@ namespace arm_compute
{
/** Basic function to compute the deconvolution layer. This function calls the following OpenCL kernels/functions:
*
+ * -# @ref CLGEMMDeconvolutionLayer
* -# @ref CLDirectDeconvolutionLayer
*/
class CLDeconvolutionLayer : public IFunction
@@ -44,7 +46,7 @@ public:
/** Set the input, weights, biases and output tensors.
*
- * @deprecated This method is deprecated and will be removed in release 19.05
+ * @note This method will be deprecated in the next release.
*
* @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
* @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
@@ -60,7 +62,7 @@ public:
unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
*
- * @deprecated This method is deprecated and will be removed in release 19.05
+ * @note This method will be deprecated in the next release.
*
* @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
* @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input.
@@ -101,6 +103,8 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
const WeightsInfo &weights_info = WeightsInfo());
+ static DeconvolutionMethod get_deconvolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
+ const WeightsInfo &weights_info);
// Inherited methods overridden:
void run() override;
void prepare() override;
@@ -109,5 +113,5 @@ private:
std::shared_ptr<IMemoryManager> _memory_manager;
std::unique_ptr<IFunction> _function;
};
-}
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h
new file mode 100644
index 0000000000..b28fa0f3f5
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLGEMMDECONVOLUTIONLAYER_H__
+#define __ARM_COMPUTE_CLGEMMDECONVOLUTIONLAYER_H__
+
+#include "arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLPermute.h"
+#include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
+#include "arm_compute/runtime/CL/functions/CLSlice.h"
+#include "arm_compute/runtime/CL/functions/CLTranspose.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ICLTensor;
+/** Function to run the deconvolution layer through a call to GEMM.
+ *
+ * Deconvolution Layer is the backward pass of Convolution Layer. First we transform the input depending on the stride and pad info and then perform a 1x1
+ * convolution pass. Input stride defines how many zeroes we should put between each element of the input, pad is the amount of padding and finally a is a user
+ * specified value where a < stride - 1, that increases the padding top and right of the input image.
+ *
+ * The relation between input to output is as follows:
+ * \f[
+ * width\_output = (width\_input - 1) \cdot stride\_x - 2 \cdot padding\_x + kernel\_x
+ * \f]
+ * \f[
+ * height\_output = (height\_input - 1) \cdot stride\_y - 2 \cdot padding\_y + kernel\_y
+ * \f]
+ *
+ * where:
+ * width_input is the size of the first input dimension.
+ * height_input is the size of the second input dimension.
+ * width_output is the size of the first output dimension.
+ * height_output is the size of the second output dimension.
+ * kernel_x and kernel_y are the convolution sizes in x and y.
+ * stride_x and stride_y is the input stride of the first and second dimension.
+ *
+ * The weights used by Deconvolution are supposed to be the same as the ones used for Convolution.
+ *
+ * This function calls the following OpenCL kernels/functions:
+ *
+ * -# @ref CLGEMMLowpMatrixMultiplyCore
+ * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
+ * -# @ref CLPermute
+ * -# @ref CLPermute
+ * -# @ref CLReshapeLayer
+ * -# @ref CLTranspose
+ * -# @ref CLDeconvolutionReshapeOutputKernel
+ * -# @ref CLSlice
+ */
+class CLGEMMDeconvolutionLayer : public IFunction
+{
+public:
+ /** Constructor */
+ CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLGEMMDeconvolutionLayer(const CLGEMMDeconvolutionLayer &) = delete;
+ /** Default move constructor */
+ CLGEMMDeconvolutionLayer(CLGEMMDeconvolutionLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLGEMMDeconvolutionLayer &operator=(const CLGEMMDeconvolutionLayer &) = delete;
+ /** Default move assignment operator */
+ CLGEMMDeconvolutionLayer &operator=(CLGEMMDeconvolutionLayer &&) = default;
+ /** Set the input, weights, biases and output tensors.
+ *
+ * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F16/F32. Data layout supported: NHWC
+ * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. Data layout supported: same as @p input.
+ * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input. Data layout supported: same as @p input.
+ * @param[out] output Output tensor. The output has the same number of dimensions as the @p input. Data layout supported: same as @p input.
+ * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. This function supports only stride_x = weights.width && stride_y = weights.height. Moreover, padding is not supported.
+ */
+ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
+ *
+ * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F16/F32. Data layout supported: NHWC
+ * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. Data layout supported: same as @p input.
+ * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input. Data layout supported: same as @p input.
+ * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input. Data layout supported: same as @p input.
+ * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info);
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ CLMemoryGroup _memory_group;
+
+ CLGEMM _mm_gemm;
+ CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
+ CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
+ CLPermute _permute_input_to_nhwc;
+ CLPermute _permute_weights_to_nhwc;
+ CLReshapeLayer _reshape_weights;
+ CLTranspose _transpose_weights;
+ CLDeconvolutionReshapeOutputKernel _deconv_reshape;
+ CLSlice _slice_gemm;
+
+ CLTensor _gemmlowp_final;
+ CLTensor _reshaped_weights;
+ CLTensor _reshaped_weights_t;
+ CLTensor _permuted_input;
+ CLTensor _permuted_weights;
+ CLTensor _gemm_output;
+ CLTensor _slice_gemm_input;
+
+ const ICLTensor *_original_weights;
+ bool _is_prepared;
+ bool _padded_input;
+ bool _is_nchw;
+ bool _is_quantized;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLGEMMDECONVOLUTIONLAYER_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 322ff517d9..df60001343 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -214,6 +214,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
{ "crop_tensor", "crop_tensor.cl" },
+ { "deconvolution_reshape", "deconvolution_layer.cl" },
{ "deconvolution_upsample", "deconvolution_layer.cl" },
{ "depthwise_convolution_3x3", "depthwise_convolution.cl" },
{ "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" },
@@ -1093,7 +1094,7 @@ Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name, const Stri
return Kernel(kernel_name, cl_program);
}
-void CLKernelLibrary::add_built_program(const std::string &built_program_name, cl::Program program)
+void CLKernelLibrary::add_built_program(const std::string &built_program_name, const cl::Program &program)
{
_built_programs_map.emplace(built_program_name, program);
}
diff --git a/src/core/CL/cl_kernels/deconvolution_layer.cl b/src/core/CL/cl_kernels/deconvolution_layer.cl
index e5169f983f..ea2455c613 100644
--- a/src/core/CL/cl_kernels/deconvolution_layer.cl
+++ b/src/core/CL/cl_kernels/deconvolution_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,3 +52,79 @@ __kernel void deconvolution_upsample(
// Store result
*((__global DATA_TYPE *)dst.ptr) = *((__global DATA_TYPE *)src.ptr);
}
+
+#if defined(FILTER_WIDTH) && defined(FILTER_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE)
+/** This kernel reshapes the deconvolution output tensor before returning the result of the Deconvolution. The decovnolution output tensor
+ * is the result of a @ref CLGEMM operation between the deconvolution input and the deconvolution filter
+ *
+ * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type, e.g., -DDATA_TYPE=F32
+ * @note The width of the filter should be given as a preprocessor argument using -DFILTER_WIDTH=width, e.g., -DFILTER_WIDTH=2
+ * @note The height of the filter should be given as a preprocessor argument using -DFILTER_HEIGHT=height, e.g., -DFILTER_HEIGHT=2
+ * @note The width of the input should be given as a preprocessor argument using -DSRC_WIDTH=width, e.g., -DSRC_WIDTH=10
+ * @note The height of the input should be given as a preprocessor argument using -DSRC_HEIGHT=width, e.g., -DSRC_HEIGHT=10
+ * @note The output data layout is NHWC if the preprocessor argument NUM_FILTERS is defined, NCHW if NUM_FILTERS is not defined
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8/F16/F32
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] dst_ptr Pointer to the destination image. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[in] bias_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32/S32
+ * @param[in] bias_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] bias_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
+ */
+__kernel void deconvolution_reshape(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst)
+#if defined(ADD_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(ADD_BIAS)
+)
+{
+#define FILTER_AREA ((FILTER_WIDTH) * (FILTER_HEIGHT))
+
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(dst);
+ const DATA_TYPE data = *(__global DATA_TYPE *)src.ptr;
+
+ // Store result
+ const int x_in = get_global_id(0);
+ const int y_in = get_global_id(1);
+ const int z_in = get_global_id(2);
+
+#if defined(NUM_FILTERS)
+ const int bias_index = x_in / (FILTER_AREA);
+ const int z_out = bias_index + (NUM_FILTERS) * (z_in / (SRC_HEIGHT));
+ const int x_out = x_in % (FILTER_WIDTH) + y_in * (FILTER_WIDTH);
+ const int y_out = (FILTER_HEIGHT) * (z_in % (SRC_HEIGHT)) + ((x_in % (FILTER_AREA)) / (FILTER_WIDTH));
+#else // defined(NUM_FILTERS)
+ const int x_out = x_in / (FILTER_AREA);
+ const int y_out = x_in % (FILTER_WIDTH) + y_in * (FILTER_WIDTH);
+ const int z_out = (FILTER_HEIGHT) * z_in + ((x_in % (FILTER_AREA)) / (FILTER_WIDTH));
+ const int bias_index = x_out;
+#endif // defined(NUM_FILTERS)
+
+#if defined(ADD_BIAS)
+ Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+ const DATA_TYPE bias_val = *(__global DATA_TYPE *)vector_offset(&bias, bias_index);
+ *((__global DATA_TYPE *)tensor3D_offset(&dst, x_out, y_out, z_out)) = data + bias_val;
+#else // defined(ADD_BIAS)
+ *((__global DATA_TYPE *)tensor3D_offset(&dst, x_out, y_out, z_out)) = data;
+#endif // defined(ADD_BIAS)
+
+#undef FILTER_AREA
+}
+#endif // defined(FILTER_WIDTH) && defined(FILTER_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE)
diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
new file mode 100644
index 0000000000..71218f5b52
--- /dev/null
+++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
+ const DataLayout data_layout = input_info->data_layout();
+ const unsigned int stride_x = deconv_info.stride().first;
+ const unsigned int stride_y = deconv_info.stride().second;
+
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ const bool is_qasymm = is_data_type_quantized_asymmetric(input_info->data_type());
+
+ ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_w) != deconv_info.stride().first);
+ ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_h) != deconv_info.stride().second);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::S32);
+ if(!is_qasymm)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_info, weights_info);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_info->dimension(idx_w) * weights_info->dimension(idx_h) * weights_info->dimension(idx_b));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != input_info->dimension(idx_w));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != input_info->dimension(idx_h));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(3) != input_info->dimension(idx_b));
+
+ if(bias != nullptr)
+ {
+ if(is_qasymm)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, input);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights_info->dimension(idx_b));
+ }
+
+ if(output->total_size() != 0)
+ {
+ auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h),
+ 0, 0, stride_x, stride_y);
+
+ const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
+ }
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input, ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ const DataLayout data_layout = input_info->data_layout();
+
+ const unsigned int stride_x = deconv_info.stride().first;
+ const unsigned int stride_y = deconv_info.stride().second;
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+
+ auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h),
+ 0, 0, stride_x, stride_y);
+
+ const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
+
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout).set_quantization_info(input->quantization_info()));
+
+ Window win = calculate_max_window(*input);
+
+ return std::make_pair(Status{}, win);
+}
+} // namespace
+
+CLDeconvolutionReshapeOutputKernel::CLDeconvolutionReshapeOutputKernel()
+ : _add_bias(false),
+ _bias(nullptr)
+{
+}
+
+void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), input_info, weights_info, deconv_info));
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), output->info(), input_info, weights_info, deconv_info);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+
+ const DataLayout data_layout = input_info->data_layout();
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ _input = input;
+ _output = output;
+ _add_bias = (bias != nullptr);
+ _bias = bias;
+
+ const int filter_w = weights_info->dimension(idx_w);
+ const int filter_h = weights_info->dimension(idx_h);
+ const int filter_b = weights_info->dimension(idx_b);
+ const int img_w = input_info->dimension(idx_w);
+ const int img_h = input_info->dimension(idx_h);
+
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DFILTER_WIDTH=" + support::cpp11::to_string(filter_w));
+ build_opts.add_option("-DFILTER_HEIGHT=" + support::cpp11::to_string(filter_h));
+ build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(img_w));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(img_h));
+ build_opts.add_option_if(data_layout == DataLayout::NCHW, "-DNUM_FILTERS=" + support::cpp11::to_string(filter_b));
+ build_opts.add_option_if(_add_bias, "-DADD_BIAS");
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("deconvolution_reshape", build_opts.options()));
+ ICLKernel::configure_internal(win_config.second);
+
+ // Set config_id for enabling LWS tuning
+ _config_id = "deconvolution_reshape_output_";
+ _config_id += lower_string(string_from_data_type(input->info()->data_type()));
+ _config_id += "_";
+ _config_id += lower_string(string_from_data_layout(input->info()->data_layout()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(1));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(1));
+}
+
+Status CLDeconvolutionReshapeOutputKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, input_info, weights_info, deconv_info));
+ return Status{};
+}
+
+void CLDeconvolutionReshapeOutputKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, collapsed);
+ add_3D_tensor_argument(idx, _output, collapsed);
+ if(_add_bias)
+ {
+ add_1D_tensor_argument(idx, _bias, collapsed);
+ }
+ enqueue(queue, *this, collapsed, lws_hint());
+}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
index eca24169b9..923b9529fa 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
@@ -321,4 +321,4 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl
}
while(window.slide_window_slice_3D(slice));
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
index 2c17473fc7..c6f79d341f 100644
--- a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
@@ -44,11 +44,29 @@ CLDeconvolutionLayer::CLDeconvolutionLayer(std::shared_ptr<IMemoryManager> memor
void CLDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info,
unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info)
{
- ARM_COMPUTE_UNUSED(inner_border_right, inner_border_top);
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- auto f = arm_compute::support::cpp14::make_unique<CLDirectDeconvolutionLayer>();
- f->configure(input, weights, bias, output, deconv_info, weights_info);
- _function = std::move(f);
+ ARM_COMPUTE_UNUSED(inner_border_right, inner_border_top);
+
+ switch(CLDeconvolutionLayer::get_deconvolution_method(input->info(), weights->info(), nullptr, output->info(), deconv_info, weights_info))
+ {
+ case DeconvolutionMethod::DIRECT:
+ {
+ auto f = arm_compute::support::cpp14::make_unique<CLDirectDeconvolutionLayer>();
+ f->configure(input, weights, bias, output, deconv_info, weights_info);
+ _function = std::move(f);
+ break;
+ }
+ case DeconvolutionMethod::GEMM:
+ {
+ auto f = arm_compute::support::cpp14::make_unique<CLGEMMDeconvolutionLayer>(_memory_manager);
+ f->configure(input, weights, bias, output, deconv_info);
+ _function = std::move(f);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported.");
+ break;
+ }
}
Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
@@ -56,10 +74,47 @@ Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_UNUSED(inner_border_right, inner_border_top);
- ARM_COMPUTE_RETURN_ON_ERROR(CLDirectDeconvolutionLayer::validate(input, weights, bias, output, deconv_info, weights_info));
+
+ switch(CLDeconvolutionLayer::get_deconvolution_method(input, weights, bias, output, deconv_info, weights_info))
+ {
+ case DeconvolutionMethod::DIRECT:
+ {
+ // Validate direct convolution layer
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDirectDeconvolutionLayer::validate(input, weights, bias, output, deconv_info, weights_info));
+ break;
+ }
+ case DeconvolutionMethod::GEMM:
+ {
+ // Validate gemm-based convolution layer
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMDeconvolutionLayer::validate(input, weights, bias, output, deconv_info));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported.");
+ break;
+ }
+
return Status{};
}
+DeconvolutionMethod CLDeconvolutionLayer::get_deconvolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info,
+ const WeightsInfo &weights_info)
+{
+ ARM_COMPUTE_UNUSED(output, bias, weights_info);
+
+ const DataLayout data_layout = input->data_layout();
+
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+
+ if(weights->dimension(idx_w) != deconv_info.stride().first || weights->dimension(idx_h) != deconv_info.stride().second)
+ {
+ return DeconvolutionMethod::DIRECT;
+ }
+
+ return DeconvolutionMethod::GEMM;
+}
+
void CLDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info,
const WeightsInfo &weights_info)
{
diff --git a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
index 721054dcf3..6e14e26cbd 100644
--- a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "utils/TypePrinter.h"
#include <memory>
#include <tuple>
@@ -161,8 +160,16 @@ void CLDirectDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights,
_flip_axis.allocator()->allocate();
_flip_axis.map(true);
auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
- axis_data[0] = 0;
- axis_data[1] = 1;
+ if(weights->info()->data_layout() == DataLayout::NHWC)
+ {
+ axis_data[0] = 1;
+ axis_data[1] = 2;
+ }
+ else
+ {
+ axis_data[0] = 0;
+ axis_data[1] = 1;
+ }
_flip_axis.unmap();
}
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index 60bfbf24e5..492709f0d0 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -206,7 +206,7 @@ void CLGEMM::configure_reshaped_v2(const ICLTensor *a, const ICLTensor *b, const
_reshape_lhs_kernel.set_target(gpu_target);
_mm_kernel.set_target(gpu_target);
- GEMMReshapeInfo reshape_info(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
+ GEMMReshapeInfo reshape_info(m, n, k, 1, 1, depth_output_gemm3d, false);
// Manage intermediate buffers
_memory_group.manage(&_tmp_a);
diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
new file mode 100644
index 0000000000..bcb91e052c
--- /dev/null
+++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "utils/TypePrinter.h"
+
+#include <memory>
+#include <tuple>
+
+namespace arm_compute
+{
+namespace
+{
+std::pair<Coordinates, Coordinates> compute_start_end_slice_coordinates(const ITensorInfo &output_info, const PadStrideInfo &deconv_info, bool is_nchw)
+{
+ Coordinates start;
+ Coordinates end;
+
+ if(is_nchw)
+ {
+ start.set(0, deconv_info.pad_left());
+ start.set(1, deconv_info.pad_top());
+ end.set(0, output_info.dimension(0) - deconv_info.pad_right());
+ end.set(1, output_info.dimension(1) - deconv_info.pad_bottom());
+ }
+ else
+ {
+ start.set(0, 0);
+ start.set(1, deconv_info.pad_left());
+ start.set(2, deconv_info.pad_top());
+
+ end.set(0, output_info.dimension(0));
+ end.set(1, output_info.dimension(1) - deconv_info.pad_right());
+ end.set(2, output_info.dimension(2) - deconv_info.pad_bottom());
+ }
+
+ return { start, end };
+}
+} // namespace
+
+CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
+ : _memory_group(std::move(memory_manager)),
+ _mm_gemm(),
+ _mm_gemmlowp(),
+ _gemmlowp_output_stage(),
+ _permute_input_to_nhwc(),
+ _permute_weights_to_nhwc(),
+ _reshape_weights(),
+ _transpose_weights(),
+ _deconv_reshape(),
+ _slice_gemm(),
+ _gemmlowp_final(),
+ _reshaped_weights(),
+ _reshaped_weights_t(),
+ _permuted_input(),
+ _permuted_weights(),
+ _gemm_output(),
+ _slice_gemm_input(),
+ _original_weights(),
+ _is_prepared(false),
+ _padded_input(false),
+ _is_nchw(false),
+ _is_quantized(false)
+{
+}
+
+Status CLGEMMDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
+
+ DataLayout data_layout = input->data_layout();
+ const bool padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
+ const bool is_nchw = input->data_layout() == DataLayout::NCHW;
+ const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
+
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != deconv_info.stride().first);
+ ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) != deconv_info.stride().second);
+
+ TensorShape nhwc_weights_shape = weights->tensor_shape();
+ TensorShape nhwc_input_shape = input->tensor_shape();
+
+ if(is_nchw)
+ {
+ permute(nhwc_weights_shape, PermutationVector(2, 0, 1));
+ permute(nhwc_input_shape, PermutationVector(2, 0, 1));
+
+ TensorInfo nhwc_input_info = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_input_shape).set_data_layout(DataLayout::NCHW);
+
+ TensorInfo nhwc_weights_info = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_weights_shape).set_data_layout(DataLayout::NCHW);
+
+ CLPermute::validate(weights, &nhwc_weights_info, PermutationVector(2, 0, 1));
+ CLPermute::validate(input, &nhwc_input_info, PermutationVector(2, 0, 1));
+ }
+
+ const TensorShape reshaped_shape = TensorShape(nhwc_weights_shape[0], nhwc_weights_shape[1] * nhwc_weights_shape[2] * nhwc_weights_shape[3]);
+ const TensorInfo reshaped_info = weights->clone()->set_tensor_shape(reshaped_shape).set_data_layout(DataLayout::NCHW).set_is_resizable(true);
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(weights, &reshaped_info));
+
+ TensorShape transposed_shape(reshaped_shape[1], reshaped_shape[0]);
+ const TensorInfo reshaped_t_info = reshaped_info.clone()->set_is_resizable(true).set_tensor_shape(transposed_shape);
+ ARM_COMPUTE_RETURN_ON_ERROR(CLTranspose::validate(&reshaped_info, &reshaped_t_info));
+
+ TensorShape gemm_output_shape(weights->dimension(idx_w) * weights->dimension(idx_h) * weights->dimension(idx_b),
+ input->dimension(idx_w),
+ input->dimension(idx_h),
+ input->dimension(idx_b));
+
+ TensorInfo gemm_output_info = reshaped_t_info.clone()->set_tensor_shape(gemm_output_shape).set_is_resizable(true);
+ GEMMInfo gemm_info(false, false, true, input->dimension(idx_h), true);
+
+ if(is_quantized)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_tensor_shape(nhwc_input_shape), &reshaped_t_info, nullptr, &gemm_output_info.set_data_type(DataType::S32),
+ gemm_info));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input->clone()->set_tensor_shape(nhwc_input_shape).set_is_resizable(true), &reshaped_t_info, nullptr, &gemm_output_info, 1.0f, 0.0f, gemm_info));
+ }
+
+ auto out_dims = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h),
+ 0, 0, deconv_info.stride().first, deconv_info.stride().second);
+ const TensorShape deconv_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights);
+ TensorInfo col2im_output_info = gemm_output_info.clone()->set_tensor_shape(deconv_shape).set_is_resizable(true);
+
+ if(padded_input && is_quantized)
+ {
+ const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&col2im_output_info, nullptr,
+ &col2im_output_info.clone()->set_is_resizable(true).set_data_type(DataType::QASYMM8)));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info.clone()->set_is_resizable(true).set_data_type(DataType::QASYMM8), output, start_end.first, start_end.second));
+ }
+ else if(padded_input)
+ {
+ const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info, output, start_end.first, start_end.second));
+ }
+ else if(is_quantized)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&col2im_output_info, nullptr, output));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, output, input, weights, deconv_info));
+ }
+
+ return Status{};
+}
+
+void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
+ ARM_COMPUTE_ERROR_THROW_ON(CLGEMMDeconvolutionLayer::validate(input->info(),
+ weights->info(),
+ bias != nullptr ? bias->info() : nullptr,
+ output->info(),
+ deconv_info));
+
+ _original_weights = weights;
+ _padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
+ _is_nchw = input->info()->data_layout() == DataLayout::NCHW;
+ _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+
+ const ICLTensor *input_to_use = input;
+ const ICLTensor *weights_to_use = weights;
+
+ // If the data layout is NCHW, transform everything in NHWC. Another alternative could be to
+ // do an outer product in NCHW and then an accumulation through a reduction. This would have two
+ // drawbacks: first, the outer product is less efficient than a full GEMM. Second, the reduction
+ // might be slower than GEMM.
+ if(_is_nchw)
+ {
+ _memory_group.manage(&_permuted_input);
+ _permute_input_to_nhwc.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U));
+
+ _permute_weights_to_nhwc.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
+
+ input_to_use = &_permuted_input;
+ weights_to_use = &_permuted_weights;
+ }
+
+ // Reshape the input weights. The weights will be reshaped only once during the call to prepare()
+ _reshaped_weights.allocator()->init(TensorInfo(TensorShape(weights_to_use->info()->dimension(0),
+ weights_to_use->info()->dimension(1) * weights_to_use->info()->dimension(2) * weights_to_use->info()->dimension(3)),
+ 1,
+ input->info()->data_type(), weights->info()->quantization_info()));
+
+ _reshape_weights.configure(weights_to_use, &_reshaped_weights);
+ _transpose_weights.configure(&_reshaped_weights, &_reshaped_weights_t);
+
+ const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
+ GEMMInfo gemm_info(false, false, true, input->info()->dimension(idx_h), true);
+
+ // Configure output stage for asymmetric quantized types
+ if(_is_quantized)
+ {
+ _mm_gemmlowp.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info);
+ }
+ else
+ {
+ _mm_gemm.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, 1.f, 0.0f, gemm_info);
+ }
+
+ if(_is_nchw)
+ {
+ _permuted_input.allocator()->allocate();
+ }
+
+ ICLTensor *deconv_reshape_output = nullptr;
+ ICLTensor *slice_output = nullptr;
+ ICLTensor *output_stage_output = nullptr;
+
+ if(_padded_input && _is_quantized)
+ {
+ _memory_group.manage(&_slice_gemm_input);
+ _memory_group.manage(&_gemmlowp_final);
+ deconv_reshape_output = &_gemmlowp_final;
+ output_stage_output = &_slice_gemm_input;
+ slice_output = output;
+ }
+ else if(_padded_input)
+ {
+ _memory_group.manage(&_slice_gemm_input);
+ deconv_reshape_output = &_slice_gemm_input;
+ slice_output = output;
+ }
+ else if(_is_quantized)
+ {
+ _memory_group.manage(&_gemmlowp_final);
+ deconv_reshape_output = &_gemmlowp_final;
+ output_stage_output = output;
+ }
+ else
+ {
+ deconv_reshape_output = output;
+ }
+
+ // Configure a Col2Im call to reshape the output of GEMM
+ _deconv_reshape.configure(&_gemm_output, bias, deconv_reshape_output, input->info(), weights->info(), deconv_info);
+ _gemm_output.allocator()->allocate();
+
+ if(_is_quantized)
+ {
+ float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / _gemmlowp_final.info()->quantization_info().scale;
+ int output_multiplier(0);
+ int output_shift(0);
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+ _gemmlowp_output_stage.configure(&_gemmlowp_final, nullptr, output_stage_output, output_multiplier, output_shift, _gemmlowp_final.info()->quantization_info().offset);
+ _gemmlowp_final.allocator()->allocate();
+ }
+
+ // If the input was padded, the output needs to be sliced.
+ if(_padded_input)
+ {
+ const auto start_end = compute_start_end_slice_coordinates(*deconv_reshape_output->info(), deconv_info, _is_nchw);
+ _slice_gemm.configure(&_slice_gemm_input, slice_output, start_end.first, start_end.second);
+ _slice_gemm_input.allocator()->allocate();
+ }
+}
+
+void CLGEMMDeconvolutionLayer::run()
+{
+ prepare();
+
+ MemoryGroupResourceScope scope_mg(_memory_group);
+
+ if(_is_nchw)
+ {
+ _permute_input_to_nhwc.run();
+ }
+
+ if(_is_quantized)
+ {
+ _mm_gemmlowp.run();
+ }
+ else
+ {
+ _mm_gemm.run();
+ }
+
+ CLScheduler::get().enqueue(_deconv_reshape, false);
+
+ if(_is_quantized)
+ {
+ _gemmlowp_output_stage.run();
+ }
+
+ if(_padded_input)
+ {
+ _slice_gemm.run();
+ }
+}
+
+void CLGEMMDeconvolutionLayer::prepare()
+{
+ if(!_is_prepared)
+ {
+ ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
+
+ if(_is_nchw)
+ {
+ _permuted_weights.allocator()->allocate();
+ _permute_weights_to_nhwc.run();
+ }
+
+ _reshaped_weights.allocator()->allocate();
+ _reshape_weights.run();
+
+ if(_is_nchw)
+ {
+ _permuted_weights.allocator()->free();
+ }
+
+ _reshaped_weights_t.allocator()->allocate();
+ _transpose_weights.run();
+
+ // Prepare gemm
+ if(!_is_quantized)
+ {
+ _mm_gemm.prepare();
+ }
+ else
+ {
+ _mm_gemmlowp.prepare();
+ }
+
+ // Free resources
+ if(!_reshaped_weights_t.is_used())
+ {
+ _reshaped_weights_t.allocator()->free();
+ }
+
+ _original_weights->mark_as_unused();
+ _is_prepared = true;
+ }
+}
+} // namespace arm_compute
diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp
index 958a0e438a..60be0b68c3 100644
--- a/tests/validation/CL/DeconvolutionLayer.cpp
+++ b/tests/validation/CL/DeconvolutionLayer.cpp
@@ -58,10 +58,13 @@ const auto data3x3 = datasets::SmallDeconvolutionShapes() * framework::dataset::
const auto data3x3_precommit = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 2) * framework::dataset::make("StrideY", 1, 2) * framework::dataset::make("PadX", 0, 2)
* framework::dataset::make("PadY", 0, 2) * framework::dataset::make("NumKernels", { 3 });
+const auto data2x2_precommit = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 2) * framework::dataset::make("StrideY", 2) * framework::dataset::make("PadX", 1)
+ * framework::dataset::make("PadY", 1) * framework::dataset::make("NumKernels", { 3 });
+
const auto data1x1 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 1)
* framework::dataset::make("PadY", 0, 1) * framework::dataset::make("NumKernels", { 3 });
-const auto data_layouts_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW });
+const auto data_layouts_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC });
} // namespace
TEST_SUITE(CL)
@@ -135,6 +138,9 @@ template <typename T>
using CLDeconvolutionLayerFixture3x3 = DeconvolutionValidationFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 3, 3>;
template <typename T>
+using CLDeconvolutionLayerFixture2x2 = DeconvolutionValidationFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 2, 2>;
+
+template <typename T>
using CLDeconvolutionLayerFixture1x1 = DeconvolutionValidationFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 1, 1>;
TEST_SUITE(Float)
@@ -164,6 +170,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3<float>, framewor
}
TEST_SUITE_END() // W3x3
+TEST_SUITE(W2x2)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2<float>, framework::DatasetMode::PRECOMMIT, combine(combine(data2x2_precommit, framework::dataset::make("DataType", DataType::F32)),
+ data_layouts_dataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END() // W2x2
+
TEST_SUITE(W1x1)
FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1<float>, framework::DatasetMode::NIGHTLY, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)),
data_layouts_dataset))
@@ -200,6 +215,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3<half>, framework
}
TEST_SUITE_END() // W3x3
+TEST_SUITE(W2x2)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2<half>, framework::DatasetMode::PRECOMMIT, combine(combine(data2x2_precommit, framework::dataset::make("DataType", DataType::F16)),
+ data_layouts_dataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END() // W2x2
+
TEST_SUITE(W1x1)
FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1<half>, framework::DatasetMode::NIGHTLY, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F16)), data_layouts_dataset))
{
@@ -218,6 +242,9 @@ template <typename T>
using CLDeconvolutionLayerQuantizedFixture3x3 = DeconvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 3, 3>;
template <typename T>
+using CLDeconvolutionLayerQuantizedFixture2x2 = DeconvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 2, 2>;
+
+template <typename T>
using CLDeconvolutionLayerQuantizedFixture1x1 = DeconvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 1, 1>;
TEST_SUITE(Quantized)
@@ -253,6 +280,17 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerQuantizedFixture3x3<uint8_t
}
TEST_SUITE_END() // W3x3
+TEST_SUITE(W2x2)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture2x2<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data2x2_precommit, framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ data_layouts_dataset),
+ framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END() // W2x2
+
TEST_SUITE(W1x1)
FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType",
DataType::QASYMM8)),