aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2017-08-23 16:36:24 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit9fe414430c3c989b1cdc79d41e031495aed2cb7c (patch)
tree5f893eeb81fe75824918273f49171c85f108fe4e
parent580f4c7f6c2cbe320c606eb6ba73b3f4a20791ef (diff)
downloadComputeLibrary-9fe414430c3c989b1cdc79d41e031495aed2cb7c.tar.gz
COMPMID-452 CL Generic Depthwise Convolution implementation.
Change-Id: I115e48fe6ce5e281f3791aa5d80fdc754cdd2b5e Reviewed-on: http://mpd-gerrit.cambridge.arm.com/85082 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h5
-rw-r--r--arm_compute/core/CL/OpenCL.h2
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h (renamed from arm_compute/core/CL/kernels/CLDepthwiseConvolutionKernel.h)22
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h70
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h70
-rw-r--r--arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h66
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h67
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h72
-rw-r--r--src/core/CL/CLKernelLibrary.cpp8
-rw-r--r--src/core/CL/OpenCL.cpp14
-rw-r--r--src/core/CL/cl_kernels/convolution_layer.cl7
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution.cl134
-rw-r--r--src/core/CL/cl_kernels/gemv.cl111
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp (renamed from src/core/CL/kernels/CLDepthwiseConvolutionKernel.cpp)12
-rw-r--r--src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp105
-rw-r--r--src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp92
-rw-r--r--src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp95
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp125
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolution.cpp85
-rw-r--r--tests/benchmark/CL/DepthwiseConvolution.cpp2
-rw-r--r--tests/benchmark/CL/DepthwiseSeparableConvolutionLayer.cpp2
-rw-r--r--tests/datasets/DepthwiseConvolutionDataset.h28
-rw-r--r--tests/datasets/LargeDepthwiseConvolutionDataset.h56
-rw-r--r--tests/datasets/SmallDepthwiseConvolutionDataset.h56
-rw-r--r--tests/validation/CL/DepthwiseConvolution.cpp9
-rw-r--r--tests/validation/CPP/DepthwiseConvolution.cpp47
26 files changed, 1181 insertions, 181 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index f8aa5f8968..de40b85080 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -44,7 +44,9 @@
#include "arm_compute/core/CL/kernels/CLConvolutionKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthConcatenateKernel.h"
#include "arm_compute/core/CL/kernels/CLDepthConvertKernel.h"
-#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h"
#include "arm_compute/core/CL/kernels/CLDerivativeKernel.h"
#include "arm_compute/core/CL/kernels/CLDilateKernel.h"
#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
@@ -57,6 +59,7 @@
#include "arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
#include "arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h"
#include "arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h"
diff --git a/arm_compute/core/CL/OpenCL.h b/arm_compute/core/CL/OpenCL.h
index 562f30bf14..897e9368f1 100644
--- a/arm_compute/core/CL/OpenCL.h
+++ b/arm_compute/core/CL/OpenCL.h
@@ -82,6 +82,7 @@ public:
using clReleaseKernel_func = cl_int (*)(cl_kernel kernel);
using clGetDeviceInfo_func = cl_int (*)(cl_device_id, cl_device_info, size_t, void *, size_t *);
using clGetDeviceIDs_func = cl_int (*)(cl_platform_id, cl_device_type, cl_uint, cl_device_id *, cl_uint *);
+ using clRetainEvent_func = cl_int (*)(cl_event);
clBuildProgram_func clBuildProgram = nullptr;
clEnqueueNDRangeKernel_func clEnqueueNDRangeKernel = nullptr;
@@ -111,6 +112,7 @@ public:
clReleaseMemObject_func clReleaseMemObject = nullptr;
clGetDeviceInfo_func clGetDeviceInfo = nullptr;
clGetDeviceIDs_func clGetDeviceIDs = nullptr;
+ clRetainEvent_func clRetainEvent = nullptr;
private:
std::pair<bool, bool> _loaded{ false, false };
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h
index 0631f07667..4e69f551b8 100644
--- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONKERNEL_H__
-#define __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONKERNEL_H__
+#ifndef __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONKERNEL3x3_H__
+#define __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONKERNEL3x3_H__
#include "arm_compute/core/CL/ICLKernel.h"
@@ -32,25 +32,25 @@ class ICLTensor;
/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor.
*/
-class CLDepthwiseConvolutionKernel : public ICLKernel
+class CLDepthwiseConvolution3x3Kernel : public ICLKernel
{
public:
/** Default constructor */
- CLDepthwiseConvolutionKernel();
+ CLDepthwiseConvolution3x3Kernel();
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionKernel(const CLDepthwiseConvolutionKernel &) = delete;
+ CLDepthwiseConvolution3x3Kernel(const CLDepthwiseConvolution3x3Kernel &) = delete;
/** Prevent instances of this class from being copied (As this class contains pointers) */
- CLDepthwiseConvolutionKernel &operator=(const CLDepthwiseConvolutionKernel &) = delete;
+ CLDepthwiseConvolution3x3Kernel &operator=(const CLDepthwiseConvolution3x3Kernel &) = delete;
/** Default Move Constructor. */
- CLDepthwiseConvolutionKernel(CLDepthwiseConvolutionKernel &&) = default;
+ CLDepthwiseConvolution3x3Kernel(CLDepthwiseConvolution3x3Kernel &&) = default;
/** Default move assignment operator. */
- CLDepthwiseConvolutionKernel &operator=(CLDepthwiseConvolutionKernel &&) = default;
+ CLDepthwiseConvolution3x3Kernel &operator=(CLDepthwiseConvolution3x3Kernel &&) = default;
/** Initialize the function's source, destination, conv and border_size.
*
* @param[in] input Source tensor. DataType supported: F32.
- * @param[out] output Destination tensor. DataType supported: F32.
+ * @param[out] output Destination tensor. Data type supported: Same as @p input.
* @param[in] weights Weights tensor. These are 3D tensors with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution. DataType supported: F32.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
*/
void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info);
@@ -69,4 +69,4 @@ private:
unsigned int _conv_pad_y;
};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTIONKERNEL_H__ */
+#endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTIONKERNEL3x3_H__ */
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h
new file mode 100644
index 0000000000..ae56adfa30
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDEPTHWISEIM2COLKERNEL_H__
+#define __ARM_COMPUTE_CLDEPTHWISEIM2COLKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/Size2D.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the depthwise im2col reshape kernel.
+ * This kernel reshape the input low 3 dimensions to a new 3D shape where the output's first dimension is
+ * the linear patch size (FILTER_WIDTH * FILTER_HEIGHT) and second dimension is number of patches in per image and third dimension unchanged .
+ **/
+class CLDepthwiseIm2ColKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLDepthwiseIm2ColKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthwiseIm2ColKernel(const CLDepthwiseIm2ColKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthwiseIm2ColKernel &operator=(const CLDepthwiseIm2ColKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLDepthwiseIm2ColKernel(CLDepthwiseIm2ColKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLDepthwiseIm2ColKernel &operator=(CLDepthwiseIm2ColKernel &&) = default;
+ /** Set the input and output of the kernel.
+ *
+ * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: F32
+ * @param[out] output The output tensor. First 3 lower dimensions represent a transform of each 3D input,
+ * while every dimension above 3 represents a batch. Data types supported: Same as @p input
+ * @param[in] kernel_dims The kernel dimensions (width and height).
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ */
+ void configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+};
+} // arm_compute
+#endif /*__ARM_COMPUTE_CLDEPTHWISEIM2COLKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h
new file mode 100644
index 0000000000..1dae9b2b5f
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDEPTHWISEVECTORTOTENSORKERNEL_H__
+#define __ARM_COMPUTE_CLDEPTHWISEVECTORTOTENSORKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the depthwise vector to tensor kernel.
+ *
+ * This kernel takes the 1D tensor that's been produced by the MatrixVectorMultiply
+ * kernel and reshapes it to given width and height (previously calculated, based
+ * on input/weights dimensions and convolution strides and padding).
+ *
+ **/
+class CLDepthwiseVectorToTensorKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLDepthwiseVectorToTensorKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthwiseVectorToTensorKernel(const CLDepthwiseVectorToTensorKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthwiseVectorToTensorKernel &operator=(const CLDepthwiseVectorToTensorKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLDepthwiseVectorToTensorKernel(CLDepthwiseVectorToTensorKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLDepthwiseVectorToTensorKernel &operator=(CLDepthwiseVectorToTensorKernel &&) = default;
+ /** Set the input and output of the kernel.
+ *
+ * @param[in] input The input vector to convert. Data type supported: F32.
+ * @param[out] output The output tensor. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: same as @p input.
+ * @param[in] conv_w The converted tensor's width.
+ * @param[in] conv_h The converted tensor's height.
+ */
+ void configure(const ICLTensor *input, ICLTensor *output, size_t conv_w, size_t conv_h);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+};
+} // arm_compute
+#endif /*__ARM_COMPUTE_CLDEPTHWISEVECTORTOTENSORKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h
new file mode 100644
index 0000000000..d493d9f052
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLDEPTHWISEWEIGHTSRESHAPEKERNEL_H__
+#define __ARM_COMPUTE_CLDEPTHWISEWEIGHTSRESHAPEKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the depthwise weights reshape kernel.
+ * This kernel reshape original weights' low 2D dimensions into a single row and
+ * have the second dimension as the original depth size.
+ *
+ **/
+class CLDepthwiseWeightsReshapeKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLDepthwiseWeightsReshapeKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthwiseWeightsReshapeKernel(const CLDepthwiseWeightsReshapeKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDepthwiseWeightsReshapeKernel &operator=(const CLDepthwiseWeightsReshapeKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLDepthwiseWeightsReshapeKernel(CLDepthwiseWeightsReshapeKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLDepthwiseWeightsReshapeKernel &operator=(CLDepthwiseWeightsReshapeKernel &&) = default;
+ /** Set the input and output of the kernel.
+ *
+ * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: F32.
+ * @param[out] output The output tensor. Data type supported: same as @p input.
+ */
+ void configure(const ICLTensor *input, ICLTensor *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+};
+} // arm_compute
+#endif /*__ARM_COMPUTE_CLDEPTHWISEWEIGHTSRESHAPEKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h
new file mode 100644
index 0000000000..580322fb51
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLGEMMMATRIXVECTORMULTIPLYKERNEL_H__
+#define __ARM_COMPUTE_CLGEMMMATRIXVECTORMULTIPLYKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the GEMM matrix vector multiply kernel. **/
+class CLGEMMMatrixVectorMultiplyKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLGEMMMatrixVectorMultiplyKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLGEMMMatrixVectorMultiplyKernel(const CLGEMMMatrixVectorMultiplyKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLGEMMMatrixVectorMultiplyKernel &operator=(const CLGEMMMatrixVectorMultiplyKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLGEMMMatrixVectorMultiplyKernel(CLGEMMMatrixVectorMultiplyKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLGEMMMatrixVectorMultiplyKernel &operator=(CLGEMMMatrixVectorMultiplyKernel &&) = default;
+ /** Set the input and output of the kernel.
+ *
+ * @param[in] input0 The reshaped input tensor. Data types supported: F16/F32
+ * @param[in] input1 The 2D reshaped weights tensor. Data type supported: Same as @p input.
+ * @param[out] output The output 2D tensor. Data types supported: Same as @p input
+ */
+ void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+ BorderSize border_size() const override;
+
+private:
+ const ICLTensor *_input0;
+ const ICLTensor *_input1;
+ ICLTensor *_output;
+ int _num_rows_read_per_iteration;
+ BorderSize _border_size;
+};
+} // arm_compute
+#endif /*__ARM_COMPUTE_CLGEMMMATRIXVECTORMULTIPLYKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h b/arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h
index cc11f9cc5a..53bc079cb2 100644
--- a/arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h
+++ b/arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h
@@ -21,26 +21,57 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_CL_DEPTHWISE_CONVOLUTION_H__
-#define __ARM_COMPUTE_CL_DEPTHWISE_CONVOLUTION_H__
+#ifndef __ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__
+#define __ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__
-#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h"
#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
#include "arm_compute/runtime/IFunction.h"
-#include <cstdint>
-
namespace arm_compute
{
class ICLTensor;
-/** Basic function to execute depthwise convolution. This function calls the following OpenCL kernels:
+/** Basic function to execute a depthwise convolution for kernel size 3x3xC. This function calls the following OpenCL kernels:
*
- * -# @ref CLFillBorderKernel (executed if border_mode == CONSTANT or border_mode == REPLICATE)
- * -# @ref CLDepthwiseConvolutionKernel
+ * -# @ref CLDepthwiseConvolution3x3Kernel
+ * -# @ref CLFillBorderKernel (if pad_x or pad_y > 0)
+ *
+ */
+class CLDepthwiseConvolution3x3 : public IFunction
+{
+public:
+ /** Default constructor */
+ CLDepthwiseConvolution3x3();
+ /** Initialize the function's source, destination, conv and border_size.
+ *
+ * @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border filling).
+ * @param[out] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [3, 3, IFM]. Data type supported: Same as @p input.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
+ */
+ void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info);
+
+ // Inherited methods overriden:
+ void run() override;
+
+private:
+ CLDepthwiseConvolution3x3Kernel _kernel;
+ CLFillBorderKernel _border_handler;
+};
+
+/** Basic function to execute a generic depthwise convolution. This function calls the following OpenCL kernels:
+ *
+ * -# @ref CLDepthwiseIm2ColKernel
+ * -# @ref CLGEMMMatrixVectorMultiplyKernel
+ * -# @ref CLDepthwiseWeightsReshapeKernel
+ * -# @ref CLFillBorderKernel (if pad_x or pad_y > 0)
*
*/
class CLDepthwiseConvolution : public IFunction
@@ -48,12 +79,12 @@ class CLDepthwiseConvolution : public IFunction
public:
/** Default constructor */
CLDepthwiseConvolution();
- /** Initialize the function's source, destination, conv and border_size.
+ /** Initialize the function's source, destination, weights and convolution information.
*
- * @param[in] input Source tensor. DataType supported: F32. (Written to only for border filling).
- * @param[out] output Destination tensor. DataType supported: F32.
- * @param[in] weights Weights tensor. These are 3D tensors with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
- * @param[in] conv_info Padding and stride information to use for the convolution. DataType supported: F32.
+ * @param[in, out] input Source tensor. Data type supported: F32. (Written to only for border filling).
+ * @param[out] output Destination tensor. Data type supported: same as @p input.
+ * @param[in] weights Weights tensor. These are 3D tensors with shape [kernel_x, kernel_y, IFM]. Data type supported: Same as @p input.
+ * @param[in] conv_info Padding and stride information to use for the convolution.
*/
void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info);
@@ -61,8 +92,15 @@ public:
void run() override;
private:
- CLDepthwiseConvolutionKernel _kernel;
- CLFillBorderKernel _border_handler;
+ CLDepthwiseIm2ColKernel _im2col_kernel;
+ CLDepthwiseWeightsReshapeKernel _weights_reshape_kernel;
+ CLGEMMMatrixVectorMultiplyKernel _v2mm_kernel;
+ CLDepthwiseVectorToTensorKernel _vector_to_tensor_kernel;
+ CLFillBorderKernel _v2mm_input_fill_border;
+ CLFillBorderKernel _v2mm_weights_fill_border;
+ CLTensor _input_reshaped;
+ CLTensor _weights_reshaped;
+ CLTensor _v2mm_output;
};
}
-#endif /*__ARM_COMPUTE_CL_DEPTHWISE_CONVOLUTION_H__ */
+#endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 4cd0a78a92..e165cf3350 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -145,6 +145,9 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
{ "depthwise_convolution_3x3", "depthwise_convolution.cl" },
+ { "depthwise_im2col", "depthwise_convolution.cl" },
+ { "depthwise_vector_to_tensor", "depthwise_convolution.cl" },
+ { "depthwise_weights_reshape", "depthwise_convolution.cl" },
{ "dequantization_layer", "dequantization_layer.cl" },
{ "derivative", "derivative.cl" },
{ "dilate", "dilate.cl" },
@@ -170,6 +173,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "gemm_ma_f32", "gemm.cl" },
{ "gemm_ma_qs8", "gemm.cl" },
{ "gemm_ma_qs16", "gemm.cl" },
+ { "gemm_mv", "gemv.cl" },
{ "gemm_mm_interleaved_transposed_u8", "gemm.cl" },
{ "gemm_mm_interleaved_transposed_f16", "gemm.cl" },
{ "gemm_mm_interleaved_transposed_f32_midgard", "gemm.cl" },
@@ -414,6 +418,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/gemm.clembed"
},
{
+ "gemv.cl",
+#include "./cl_kernels/gemv.clembed"
+ },
+ {
"harris_corners.cl",
#include "./cl_kernels/harris_corners.clembed"
},
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 0f44ad999f..c997116df5 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -99,6 +99,7 @@ bool CLSymbols::load(const std::string &library)
clReleaseMemObject = reinterpret_cast<clReleaseMemObject_func>(dlsym(handle, "clReleaseMemObject"));
clGetDeviceInfo = reinterpret_cast<clGetDeviceInfo_func>(dlsym(handle, "clGetDeviceInfo"));
clGetDeviceIDs = reinterpret_cast<clGetDeviceIDs_func>(dlsym(handle, "clGetDeviceIDs"));
+ clRetainEvent = reinterpret_cast<clRetainEvent_func>(dlsym(handle, "clRetainEvent"));
dlclose(handle);
@@ -617,3 +618,16 @@ cl_int clGetDeviceInfo(cl_device_id device,
return CL_OUT_OF_RESOURCES;
}
}
+
+cl_int clRetainEvent(cl_event event)
+{
+ auto func = arm_compute::CLSymbols::get().clRetainEvent;
+ if(func != nullptr)
+ {
+ return func(event);
+ }
+ else
+ {
+ return CL_OUT_OF_RESOURCES;
+ }
+}
diff --git a/src/core/CL/cl_kernels/convolution_layer.cl b/src/core/CL/cl_kernels/convolution_layer.cl
index 162632bce6..9e9d0b0ccc 100644
--- a/src/core/CL/cl_kernels/convolution_layer.cl
+++ b/src/core/CL/cl_kernels/convolution_layer.cl
@@ -117,6 +117,9 @@ __kernel void reshape_to_columns(
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] filter_depth The depth of the used filter
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
*/
__kernel void im2col_generic(
TENSOR3D_DECLARATION(src),
@@ -192,6 +195,9 @@ __kernel void im2col_generic(
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] filter_depth The depth of the used filter
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
*/
__kernel void im2col_kernel3x3_padx0_pady0(
TENSOR3D_DECLARATION(src),
@@ -279,6 +285,7 @@ __kernel void col2im(
*((__global DATA_TYPE *)(dst.ptr + idx)) = *((__global DATA_TYPE *)(src.ptr));
}
#endif // defined(WIDTH_OUTPUT)
+
/** This kernel reshapes the tensor's low three dimensions to single row for GEMM operation
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl
index cbcdbf2a34..9c2c3a5b37 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution.cl
@@ -24,6 +24,8 @@
#include "helpers.h"
+#if defined(CONV_STRIDE_X)
+
#if CONV_STRIDE_X == 1
#define convolution1x3 convolution1x3_stride_1
#elif CONV_STRIDE_X == 2
@@ -186,4 +188,134 @@ __kernel void depthwise_convolution_3x3(TENSOR3D_DECLARATION(src), TENSOR3D_DECL
weights_values2.s0, weights_values2.s1, weights_values2.s2);
vstore2(pixels, 0, (__global float *)dst.ptr);
-} \ No newline at end of file
+}
+
+#endif //defined(CONV_STRIDE_X)
+
+#if defined(SRC_WIDTH) && defined(DATA_TYPE)
+/** This kernel reshapes each of the tensor's low three dimensions to single rows.
+ *
+ * @note Datatype and source width should be given as a preprocessor argument using -DDATA_TYPE=type and -DSRC_WIDTH=width. e.g. -DSRC_WIDTH=128
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void depthwise_weights_reshape(TENSOR3D_DECLARATION(src), IMAGE_DECLARATION(dst))
+{
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+
+ __global DATA_TYPE *input_ptr = (__global DATA_TYPE *)src.ptr;
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + get_global_id(1) * SRC_WIDTH * dst_stride_x + get_global_id(2) * dst_stride_y;
+
+ for(int i = 0; i < SRC_WIDTH; ++i, ++input_ptr)
+ {
+ *((__global DATA_TYPE *)(output_ptr + i * dst_stride_x)) = *input_ptr;
+ }
+}
+#endif //defined(SRC_WIDTH) && defined(DATA_TYPE)
+
+#if defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_X) && defined(PAD_Y) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE)
+/** This kernel performs a reshaping of the input tensor to a tensor used to perform depthwise convolution using vector to matrix multiplication.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The convolution information must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y, -DPAD_X, -DPAD_Y, -DKERNEL_WIDHT, -DKERNEL_HEIGHT, -DSRC_WIDTH, -DSRC_HEIGHT
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+
+__kernel void depthwise_im2col(TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst))
+{
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+
+ const int src_pixel_linear = get_global_id(1) * STRIDE_X;
+ const int full_length = SRC_WIDTH + 2 * PAD_X;
+ const int max_initial_x = STRIDE_X * (((full_length - KERNEL_WIDTH) / STRIDE_X) + 1);
+
+ const int src_x = -PAD_X + src_pixel_linear % max_initial_x;
+ const int src_y = -PAD_Y + src_pixel_linear / max_initial_x * STRIDE_Y;
+ const int src_z = get_global_id(2);
+
+ __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + src_z * src_stride_z;
+ __global DATA_TYPE *output_ptr = ((__global DATA_TYPE *)(dst.ptr));
+
+ for(int y = src_y; y < src_y + KERNEL_HEIGHT; ++y)
+ {
+ for(int x = src_x; x < src_x + KERNEL_WIDTH; ++x, ++output_ptr)
+ {
+ if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT)
+ {
+ *output_ptr = 0;
+ }
+ else
+ {
+ *output_ptr = *((__global DATA_TYPE *)(input_ptr + x * src_stride_x + y * src_stride_y));
+ }
+ }
+ }
+}
+
+#endif //defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_X) && defined(PAD_Y) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(DATA_TYPE)
+
+#if defined(CONV_WIDTH) && defined(CONV_HEIGHT) && defined(DATA_TYPE)
+
+/** This kernel performs a reshaping of the output of the depthwise generic convolution.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The convolution information must be passed at compile time using -DCONV_WIDTH, -DCONV_HEIGHT, e.g -DCONV_WIDTH=32, -DCONV_HEIGHT=42
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void depthwise_vector_to_tensor(
+ VECTOR_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Vector src = CONVERT_TO_VECTOR_STRUCT(src);
+
+ const int patch_size = CONV_WIDTH * CONV_HEIGHT;
+ const int id0 = get_global_id(0);
+ const int z = id0 / patch_size;
+ const int index2D = id0 - z * patch_size;
+
+ __global uchar *out_ptr = dst_ptr + dst_offset_first_element_in_bytes + index2D % CONV_WIDTH * dst_stride_x + index2D / CONV_WIDTH * dst_stride_y + z * dst_stride_z;
+ *((__global DATA_TYPE *)out_ptr) = *((__global DATA_TYPE *)src.ptr);
+}
+
+#endif //defined(CONV_WIDTH) && defined(CONV_HEIGHT) && defined(DATA_TYPE)
diff --git a/src/core/CL/cl_kernels/gemv.cl b/src/core/CL/cl_kernels/gemv.cl
new file mode 100644
index 0000000000..76128f7033
--- /dev/null
+++ b/src/core/CL/cl_kernels/gemv.cl
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+/** This kernel applies dot product to each plane on the input tensor and the corrispective column of the reshaped weight tensor.
+ *
+ * @note Datatype and source width and height should be given as a preprocessor argument using -DDATA_TYPE=type, -DSRC_WIDTH=width and -DSRC_HEIGHT=height. e.g. -DDATA_TYPE=short
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] weights_ptr Pointer to the weights tensor. Same as @p src_ptr
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void gemm_mv(TENSOR3D_DECLARATION(src), IMAGE_DECLARATION(weights), VECTOR_DECLARATION(dst))
+{
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+
+ int y = get_global_id(1) * 4;
+ int z = get_global_id(2);
+
+ __global uchar *current_weights = weights_ptr + weights_offset_first_element_in_bytes + z * weights_stride_y;
+ __global uchar *input_ptr = src.ptr;
+
+ DATA_TYPE acc0 = (DATA_TYPE)0;
+ DATA_TYPE acc1 = (DATA_TYPE)0;
+ DATA_TYPE acc2 = (DATA_TYPE)0;
+ DATA_TYPE acc3 = (DATA_TYPE)0;
+
+ // This kernel handle 4 rows in per thread so that it can reuse the weights
+ for(int i = 0; i < SRC_WIDTH; i += 4)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ weights = vload4(0, (__global DATA_TYPE *)(current_weights + i * weights_stride_x));
+
+ int4 offset = (int4)i * (int4)src_stride_x + (int4)(0, 1, 2, 3) * (int4)src_stride_y;
+
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp0 = vload4(0, (__global DATA_TYPE *)(input_ptr + offset.s0));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp1 = vload4(0, (__global DATA_TYPE *)(input_ptr + offset.s1));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp2 = vload4(0, (__global DATA_TYPE *)(input_ptr + offset.s2));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ tmp3 = vload4(0, (__global DATA_TYPE *)(input_ptr + offset.s3));
+
+ acc0 += dot(weights, tmp0);
+ acc1 += dot(weights, tmp1);
+ acc2 += dot(weights, tmp2);
+ acc3 += dot(weights, tmp3);
+ }
+
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y + z * SRC_HEIGHT) * dst_stride_x;
+
+ int rows_left = SRC_HEIGHT - (y + 4);
+
+ // This if check is used to handle the last few rows when it can't be divided by the four
+ if(rows_left >= 0)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ out = (VEC_DATA_TYPE(DATA_TYPE, 4))(acc0, acc1, acc2, acc3);
+ vstore4(out, 0, (__global DATA_TYPE *)output_ptr);
+ }
+ else
+ {
+ switch(rows_left)
+ {
+ case -1: // three rows left; one is padding
+ *((__global DATA_TYPE *)(output_ptr + 2 * dst_stride_x)) = acc2;
+ case -2: // two rows left; two are padding
+ *((__global DATA_TYPE *)(output_ptr + 1 * dst_stride_x)) = acc1;
+ case -3: // one row left; three are padding
+ *((__global DATA_TYPE *)(output_ptr + 0 * dst_stride_x)) = acc0;
+ break;
+ }
+ }
+}
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
index a24e304359..c10e6bea12 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionKernel.h"
+#include "arm_compute/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/CL/CLHelpers.h"
@@ -36,17 +36,17 @@
using namespace arm_compute;
-CLDepthwiseConvolutionKernel::CLDepthwiseConvolutionKernel()
+CLDepthwiseConvolution3x3Kernel::CLDepthwiseConvolution3x3Kernel()
: _border_size(0), _input(), _output(), _weights(), _conv_stride_x(0), _conv_stride_y(0), _conv_pad_x(0), _conv_pad_y(0)
{
}
-BorderSize CLDepthwiseConvolutionKernel::border_size() const
+BorderSize CLDepthwiseConvolution3x3Kernel::border_size() const
{
return _border_size;
}
-void CLDepthwiseConvolutionKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info)
+void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
@@ -91,7 +91,7 @@ void CLDepthwiseConvolutionKernel::configure(const ICLTensor *input, ICLTensor *
ICLKernel::configure(win);
}
-void CLDepthwiseConvolutionKernel::run(const Window &window, cl::CommandQueue &queue)
+void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue &queue)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
@@ -117,4 +117,4 @@ void CLDepthwiseConvolutionKernel::run(const Window &window, cl::CommandQueue &q
enqueue(queue, *this, slice_out);
}
while(window.slide_window_slice_3D(slice_out));
-} \ No newline at end of file
+}
diff --git a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
new file mode 100644
index 0000000000..0eaadb80c6
--- /dev/null
+++ b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "support/ToolchainSupport.h"
+
+#include <tuple>
+
+using namespace arm_compute;
+
+CLDepthwiseIm2ColKernel::CLDepthwiseIm2ColKernel()
+ : _input(nullptr), _output(nullptr)
+{
+}
+
+void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != output->info()->dimension(2));
+ ARM_COMPUTE_ERROR_ON(output->info()->dimension(0) != (kernel_dims.width * kernel_dims.height));
+
+ _input = input;
+ _output = output;
+
+ // Create kernel
+ std::set<std::string> build_opts;
+
+ build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.emplace("-DSTRIDE_X=" + support::cpp11::to_string(conv_info.stride().first));
+ build_opts.emplace("-DSTRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second));
+ build_opts.emplace("-DPAD_X=" + support::cpp11::to_string(conv_info.pad().first));
+ build_opts.emplace("-DPAD_Y=" + support::cpp11::to_string(conv_info.pad().second));
+ build_opts.emplace("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.emplace("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
+ build_opts.emplace("-DKERNEL_WIDTH=" + support::cpp11::to_string(kernel_dims.width));
+ build_opts.emplace("-DKERNEL_HEIGHT=" + support::cpp11::to_string(kernel_dims.height));
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_im2col", build_opts));
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps());
+ // The CLDepthwiseIm2ColKernel doesn't need padding so update_window_and_padding() can be skipped
+ output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+ ICLKernel::configure(win);
+}
+
+void CLDepthwiseIm2ColKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
+
+ Window slice = window.first_slice_window_3D();
+ Window slice_in = window.first_slice_window_3D();
+
+ // Setup slice
+ slice.set(Window::DimX, Window::Dimension(0, _output->info()->dimension(0), _output->info()->dimension(0)));
+ slice.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), 1));
+ slice.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(2), 1));
+
+ // Setup input slice
+ // The first three dimensions of the input are increased by the inner loops
+ slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice_in);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice);
+ }
+ while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in));
+}
diff --git a/src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp b/src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp
new file mode 100644
index 0000000000..2086b1de03
--- /dev/null
+++ b/src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+CLDepthwiseVectorToTensorKernel::CLDepthwiseVectorToTensorKernel()
+ : _input(nullptr), _output(nullptr)
+{
+}
+
+void CLDepthwiseVectorToTensorKernel::configure(const ICLTensor *input, ICLTensor *output, size_t conv_w, size_t conv_h)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+
+ _input = input;
+ _output = output;
+
+ // Create kernel
+ std::set<std::string> build_opts;
+ build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.emplace("-DCONV_WIDTH=" + support::cpp11::to_string(conv_w));
+ build_opts.emplace("-DCONV_HEIGHT=" + support::cpp11::to_string(conv_h));
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_vector_to_tensor", build_opts));
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps());
+ // The CLDepthwisevectorToTensorKernel doesn't need padding so update_window_and_padding() can be skipped
+ output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+ ICLKernel::configure(win);
+}
+
+void CLDepthwiseVectorToTensorKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
+
+ Window slice = window.first_slice_window_1D();
+ Window slice_out = window.first_slice_window_3D();
+
+ // Setup slice
+ slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), 1));
+
+ // Setup output slice
+ // The first three dimensions of the output are increased by the inner loops
+ slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ do
+ {
+ unsigned int idx = 0;
+ add_1D_tensor_argument(idx, _input, slice);
+ add_3D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice);
+ }
+ while(window.slide_window_slice_1D(slice) && window.slide_window_slice_3D(slice_out));
+}
diff --git a/src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp
new file mode 100644
index 0000000000..68de68b4c5
--- /dev/null
+++ b/src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+CLDepthwiseWeightsReshapeKernel::CLDepthwiseWeightsReshapeKernel()
+ : _input(nullptr), _output(nullptr)
+{
+}
+
+void CLDepthwiseWeightsReshapeKernel::configure(const ICLTensor *input, ICLTensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != output->info()->dimension(1));
+ ARM_COMPUTE_ERROR_ON(output->info()->dimension(0) != input->info()->dimension(0) * input->info()->dimension(1));
+
+ _input = input;
+ _output = output;
+
+ // Create kernel
+ std::set<std::string> build_opts;
+
+ build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.emplace("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_weights_reshape", build_opts));
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps());
+ // The CLDepthwiseWeightsReshapeKernel doesn't need padding so update_window_and_padding() can be skipped
+ output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+ ICLKernel::configure(win);
+}
+
+void CLDepthwiseWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
+
+ Window slice = window.first_slice_window_3D();
+ Window slice_out = window.first_slice_window_2D();
+
+ // Setup slice
+ slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), _input->info()->dimension(0)));
+ slice.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), 1));
+ slice.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), 1));
+
+ // Setup output slice
+ // The first two dimensions of the output are increased by the inner loops
+ slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ add_2D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice);
+ }
+ while(window.slide_window_slice_3D(slice) && window.slide_window_slice_2D(slice_out));
+}
diff --git a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
new file mode 100644
index 0000000000..9b8a5fdb73
--- /dev/null
+++ b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+
+using namespace arm_compute;
+
+CLGEMMMatrixVectorMultiplyKernel::CLGEMMMatrixVectorMultiplyKernel()
+ : _input0(nullptr), _input1(nullptr), _output(nullptr), _num_rows_read_per_iteration(0), _border_size(0)
+{
+}
+BorderSize CLGEMMMatrixVectorMultiplyKernel::border_size() const
+{
+ return _border_size;
+}
+
+void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
+ ARM_COMPUTE_ERROR_ON(input0->info()->dimension(2) != input1->info()->dimension(1));
+
+ _input0 = input0;
+ _input1 = input1;
+ _output = output;
+
+ // Create kernel
+ std::set<std::string> build_opts;
+
+ build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type()));
+ build_opts.emplace("-DSRC_WIDTH=" + support::cpp11::to_string(input0->info()->dimension(0)));
+ build_opts.emplace("-DSRC_HEIGHT=" + support::cpp11::to_string(input0->info()->dimension(1)));
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mv", build_opts));
+
+ // Configure kernel window
+ const unsigned int num_elems_read_per_iteration = 4;
+
+ _num_rows_read_per_iteration = 4;
+
+ const unsigned int border_x = num_elems_read_per_iteration - input0->info()->dimension(0) % num_elems_read_per_iteration;
+ const unsigned int border_y = _num_rows_read_per_iteration - input0->info()->dimension(1) % _num_rows_read_per_iteration;
+
+ _border_size = BorderSize(border_y, border_x);
+
+ Window win = calculate_max_window(*input0->info(), Steps(num_elems_read_per_iteration));
+
+ AccessWindowRectangle input0_access(input0->info(), 0, 0, border_size().right, border_size().bottom);
+ AccessWindowHorizontal input1_access(input1->info(), 0, num_elems_read_per_iteration);
+ AccessWindowStatic output_access(_output->info(), 0, 0, _output->info()->dimension(0) + border_x, _output->info()->dimension(1) + border_y);
+
+ update_window_and_padding(win, input0_access, input1_access, output_access);
+
+ _output->info()->set_valid_region(ValidRegion(Coordinates(), _output->info()->tensor_shape()));
+
+ ICLKernel::configure(win);
+}
+
+void CLGEMMMatrixVectorMultiplyKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
+
+ Window slice_in = window.first_slice_window_3D();
+ Window slice_in2 = window.first_slice_window_3D();
+ Window slice_out = window.first_slice_window_3D();
+
+ // Setup input0 slice
+ slice_in.set(Window::DimX, Window::Dimension(0, _input0->info()->dimension(0) + border_size().right, _input0->info()->dimension(0) + border_size().right));
+ slice_in.set(Window::DimY, Window::Dimension(0, _input0->info()->dimension(1) + border_size().bottom, _num_rows_read_per_iteration));
+ slice_in.set(Window::DimZ, Window::Dimension(0, _input0->info()->dimension(2), 1));
+
+ // Setup input1 and output slice. Their dimensions are increased in the cl kernel.
+ slice_in2.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_in2.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_in2.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ unsigned int idx_1 = num_arguments_per_3D_tensor();
+
+ add_2D_tensor_argument(idx_1, _input1, slice_in2);
+
+ do
+ {
+ unsigned int idx_0 = 0;
+ unsigned int idx_2 = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor();
+ add_3D_tensor_argument(idx_0, _input0, slice_in);
+ add_1D_tensor_argument(idx_2, _output, slice_out);
+ enqueue(queue, *this, slice_in);
+ }
+ while(window.slide_window_slice_3D(slice_in) && window.slide_window_slice_3D(slice_out));
+}
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp b/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
index 7dac885ed0..22c037fc2a 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
@@ -30,23 +30,98 @@
using namespace arm_compute;
-CLDepthwiseConvolution::CLDepthwiseConvolution()
+CLDepthwiseConvolution3x3::CLDepthwiseConvolution3x3()
: _kernel(), _border_handler()
{
}
-void CLDepthwiseConvolution::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info)
+void CLDepthwiseConvolution3x3::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
_kernel.configure(input, output, weights, conv_info);
_border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
}
-void CLDepthwiseConvolution::run()
+void CLDepthwiseConvolution3x3::run()
{
CLScheduler::get().enqueue(_border_handler);
CLScheduler::get().enqueue(_kernel);
-} \ No newline at end of file
+}
+
+CLDepthwiseConvolution::CLDepthwiseConvolution()
+ : _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _v2mm_input_fill_border(), _v2mm_weights_fill_border(), _input_reshaped(), _weights_reshaped(),
+ _v2mm_output()
+{
+}
+
+void CLDepthwiseConvolution::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *weights, const PadStrideInfo &conv_info)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != weights->info()->dimension(2));
+
+ const size_t weights_w = weights->info()->dimension(0);
+ const size_t weights_h = weights->info()->dimension(1);
+ const size_t weights_z = weights->info()->dimension(2);
+
+ unsigned int conv_w = 0;
+ unsigned int conv_h = 0;
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights_w, weights_h, conv_info);
+
+ // Set up intermediate tensors
+ const size_t patch_size = weights_w * weights_h;
+ const size_t conv_size = conv_w * conv_h;
+
+ TensorShape shape_im2col = input->info()->tensor_shape();
+ shape_im2col.set(0, patch_size);
+ shape_im2col.set(1, conv_size);
+ shape_im2col.set(2, weights_z);
+
+ const TensorShape shape_weights_reshape(patch_size, weights_z);
+ TensorShape shape_v2mm_out = output->info()->tensor_shape();
+ shape_v2mm_out.set(0, conv_size * weights_z);
+ shape_v2mm_out.set(1, 1);
+ shape_v2mm_out.set(2, 1);
+
+ const TensorInfo info_im2col(shape_im2col, 1, input->info()->data_type(), input->info()->fixed_point_position());
+ const TensorInfo info_weights_reshape(shape_weights_reshape, 1, weights->info()->data_type(), weights->info()->fixed_point_position());
+ const TensorInfo info_v2mm_out(shape_v2mm_out, 1, input->info()->data_type(), input->info()->fixed_point_position());
+
+ _input_reshaped.allocator()->init(info_im2col);
+ _weights_reshaped.allocator()->init(info_weights_reshape);
+ _v2mm_output.allocator()->init(info_v2mm_out);
+
+ // Configure kernels
+ _im2col_kernel.configure(input, &_input_reshaped, Size2D(weights_w, weights_h), conv_info);
+ _weights_reshape_kernel.configure(weights, &_weights_reshaped);
+ _v2mm_kernel.configure(&_input_reshaped, &_weights_reshaped, &_v2mm_output);
+ _vector_to_tensor_kernel.configure(&_v2mm_output, output, conv_w, conv_h);
+
+ BorderSize border_size = _v2mm_kernel.border_size();
+ _v2mm_input_fill_border.configure(&_input_reshaped, border_size, BorderMode::CONSTANT, PixelValue(0));
+
+ border_size.bottom = 0;
+ _v2mm_weights_fill_border.configure(&_weights_reshaped, border_size, BorderMode::CONSTANT, PixelValue(0));
+
+ // Allocate intermediate tensors
+ _input_reshaped.allocator()->allocate();
+ _weights_reshaped.allocator()->allocate();
+ _v2mm_output.allocator()->allocate();
+}
+
+void CLDepthwiseConvolution::run()
+{
+ CLScheduler::get().enqueue(_im2col_kernel);
+
+ CLScheduler::get().enqueue(_weights_reshape_kernel);
+
+ CLScheduler::get().enqueue(_v2mm_input_fill_border);
+ CLScheduler::get().enqueue(_v2mm_weights_fill_border);
+ CLScheduler::get().enqueue(_v2mm_kernel);
+
+ CLScheduler::get().enqueue(_vector_to_tensor_kernel);
+}
diff --git a/tests/benchmark/CL/DepthwiseConvolution.cpp b/tests/benchmark/CL/DepthwiseConvolution.cpp
index acdc4019b4..a8c229fad8 100644
--- a/tests/benchmark/CL/DepthwiseConvolution.cpp
+++ b/tests/benchmark/CL/DepthwiseConvolution.cpp
@@ -44,7 +44,7 @@ TEST_SUITE(CL)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetDepthwiseConvolution, CLDepthwiseConvolutionFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(datasets::MobileNetDepthwiseConvolutionDataset(), data_types),
- framework::dataset::make("Batches", { 1, 4, 8 })));
+ framework::dataset::make("Batches", { 1 })));
TEST_SUITE_END()
} // namespace test
diff --git a/tests/benchmark/CL/DepthwiseSeparableConvolutionLayer.cpp b/tests/benchmark/CL/DepthwiseSeparableConvolutionLayer.cpp
index 1ab8628435..0c4fbb18bd 100644
--- a/tests/benchmark/CL/DepthwiseSeparableConvolutionLayer.cpp
+++ b/tests/benchmark/CL/DepthwiseSeparableConvolutionLayer.cpp
@@ -44,7 +44,7 @@ TEST_SUITE(CL)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetDepthwiseSeparableConvolutionLayer, CLDepthwiseSeparableConvolutionLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(datasets::MobileNetDepthwiseSeparableConvolutionLayerDataset(), data_types),
- framework::dataset::make("Batches", { 1, 4, 8 })));
+ framework::dataset::make("Batches", { 1 })));
TEST_SUITE_END()
} // namespace test
diff --git a/tests/datasets/DepthwiseConvolutionDataset.h b/tests/datasets/DepthwiseConvolutionDataset.h
index 93da37532f..bdc949501e 100644
--- a/tests/datasets/DepthwiseConvolutionDataset.h
+++ b/tests/datasets/DepthwiseConvolutionDataset.h
@@ -113,6 +113,34 @@ private:
std::vector<TensorShape> _dst_shapes{};
std::vector<PadStrideInfo> _infos{};
};
+class SmallDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
+{
+public:
+ SmallDepthwiseConvolutionDataset()
+ {
+ add_config(TensorShape(7U, 7U, 3U), TensorShape(3U, 3U, 3U), TensorShape(5U, 5U, 3U), PadStrideInfo(1, 1, 0, 0));
+ add_config(TensorShape(23U, 27U, 5U), TensorShape(3U, 5U, 5U), TensorShape(11U, 23U, 5U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(33U, 27U, 7U), TensorShape(7U, 3U, 7U), TensorShape(10U, 13U, 7U), PadStrideInfo(3, 2, 1, 0));
+ add_config(TensorShape(17U, 31U, 2U), TensorShape(5U, 9U, 2U), TensorShape(15U, 13U, 2U), PadStrideInfo(1, 2, 1, 1));
+ add_config(TensorShape(23U, 27U, 5U), TensorShape(11U, 3U, 5U), TensorShape(13U, 13U, 5U), PadStrideInfo(1, 2, 0, 0));
+ add_config(TensorShape(17U, 31U, 2U, 3U), TensorShape(5U, 9U, 2U), TensorShape(15U, 13U, 2U, 3U), PadStrideInfo(1, 2, 1, 1));
+ }
+};
+
+class LargeDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
+{
+public:
+ LargeDepthwiseConvolutionDataset()
+ {
+ add_config(TensorShape(233U, 277U, 55U), TensorShape(3U, 3U, 55U), TensorShape(116U, 275U, 55U), PadStrideInfo(2, 1, 0, 0));
+ add_config(TensorShape(333U, 277U, 77U), TensorShape(3U, 3U, 77U), TensorShape(111U, 138U, 77U), PadStrideInfo(3, 2, 1, 0));
+ add_config(TensorShape(177U, 311U, 22U), TensorShape(3U, 3U, 22U), TensorShape(177U, 156U, 22U), PadStrideInfo(1, 2, 1, 1));
+ add_config(TensorShape(233U, 277U, 55U), TensorShape(3U, 3U, 55U), TensorShape(231U, 138U, 55U), PadStrideInfo(1, 2, 0, 0));
+ add_config(TensorShape(333U, 277U, 77U), TensorShape(3U, 3U, 77U), TensorShape(166U, 93U, 77U), PadStrideInfo(2, 3, 0, 1));
+ add_config(TensorShape(177U, 311U, 22U), TensorShape(3U, 3U, 22U), TensorShape(89U, 311U, 22U), PadStrideInfo(2, 1, 1, 1));
+ }
+};
+
} // namespace datasets
} // namespace test
} // namespace arm_compute
diff --git a/tests/datasets/LargeDepthwiseConvolutionDataset.h b/tests/datasets/LargeDepthwiseConvolutionDataset.h
deleted file mode 100644
index 22b1516d20..0000000000
--- a/tests/datasets/LargeDepthwiseConvolutionDataset.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LARGE_DEPTHWISE_CONVOLUTION_DATASET
-#define ARM_COMPUTE_TEST_LARGE_DEPTHWISE_CONVOLUTION_DATASET
-
-#include "tests/datasets/DepthwiseConvolutionDataset.h"
-
-#include "tests/TypePrinter.h"
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace datasets
-{
-class LargeDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
-{
-public:
- LargeDepthwiseConvolutionDataset()
- {
- add_config(TensorShape(233U, 277U, 55U), TensorShape(3U, 3U, 55U), TensorShape(116U, 275U, 55U), PadStrideInfo(2, 1, 0, 0));
- add_config(TensorShape(333U, 277U, 77U), TensorShape(3U, 3U, 77U), TensorShape(111U, 138U, 77U), PadStrideInfo(3, 2, 1, 0));
- add_config(TensorShape(177U, 311U, 22U), TensorShape(3U, 3U, 22U), TensorShape(177U, 156U, 22U), PadStrideInfo(1, 2, 1, 1));
- add_config(TensorShape(233U, 277U, 55U), TensorShape(3U, 3U, 55U), TensorShape(231U, 138U, 55U), PadStrideInfo(1, 2, 0, 0));
- add_config(TensorShape(333U, 277U, 77U), TensorShape(3U, 3U, 77U), TensorShape(166U, 93U, 77U), PadStrideInfo(2, 3, 0, 1));
- add_config(TensorShape(177U, 311U, 22U), TensorShape(3U, 3U, 22U), TensorShape(89U, 311U, 22U), PadStrideInfo(2, 1, 1, 1));
- }
-};
-} // namespace datasets
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LARGE_DEPTHWISE_CONVOLUTION_DATASET */
diff --git a/tests/datasets/SmallDepthwiseConvolutionDataset.h b/tests/datasets/SmallDepthwiseConvolutionDataset.h
deleted file mode 100644
index 17d01fb5d6..0000000000
--- a/tests/datasets/SmallDepthwiseConvolutionDataset.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_SMALL_DEPTHWISE_CONVOLUTION_DATASET
-#define ARM_COMPUTE_TEST_SMALL_DEPTHWISE_CONVOLUTION_DATASET
-
-#include "tests/datasets/DepthwiseConvolutionDataset.h"
-
-#include "tests/TypePrinter.h"
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace datasets
-{
-class SmallDepthwiseConvolutionDataset final : public DepthwiseConvolutionDataset
-{
-public:
- SmallDepthwiseConvolutionDataset()
- {
- add_config(TensorShape(23U, 27U, 5U), TensorShape(3U, 3U, 5U), TensorShape(11U, 25U, 5U), PadStrideInfo(2, 1, 0, 0));
- add_config(TensorShape(33U, 27U, 7U), TensorShape(3U, 3U, 7U), TensorShape(11U, 13U, 7U), PadStrideInfo(3, 2, 1, 0));
- add_config(TensorShape(17U, 31U, 2U), TensorShape(3U, 3U, 2U), TensorShape(17U, 16U, 2U), PadStrideInfo(1, 2, 1, 1));
- add_config(TensorShape(23U, 27U, 5U), TensorShape(3U, 3U, 5U), TensorShape(21U, 13U, 5U), PadStrideInfo(1, 2, 0, 0));
- add_config(TensorShape(33U, 27U, 7U), TensorShape(3U, 3U, 7U), TensorShape(16U, 9U, 7U), PadStrideInfo(2, 3, 0, 1));
- add_config(TensorShape(17U, 31U, 2U), TensorShape(3U, 3U, 2U), TensorShape(9U, 31U, 2U), PadStrideInfo(2, 1, 1, 1));
- }
-};
-} // namespace datasets
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SMALL_DEPTHWISE_CONVOLUTION_DATASET */
diff --git a/tests/validation/CL/DepthwiseConvolution.cpp b/tests/validation/CL/DepthwiseConvolution.cpp
index d689f95422..1646ab6157 100644
--- a/tests/validation/CL/DepthwiseConvolution.cpp
+++ b/tests/validation/CL/DepthwiseConvolution.cpp
@@ -27,8 +27,7 @@
#include "arm_compute/runtime/CL/functions/CLDepthwiseConvolution.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
-#include "tests/datasets/LargeDepthwiseConvolutionDataset.h"
-#include "tests/datasets/SmallDepthwiseConvolutionDataset.h"
+#include "tests/datasets/DepthwiseConvolutionDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
@@ -47,18 +46,18 @@ constexpr RelativeTolerance<float> tolerance_f32(0.01f); /**< Tolerance value fo
} // namespace
TEST_SUITE(CL)
-TEST_SUITE(DepthwiseConvolution)
+TEST_SUITE(DepthwiseConvolutionLayer)
template <typename T>
using CLDepthwiseConvolutionFixture = DepthwiseConvolutionValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolution, T>;
// FIXME: COMPMID-523 fix the bug in depthwise convolution
-DISABLED_FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionFixture<float>, framework::DatasetMode::PRECOMMIT, datasets::SmallDepthwiseConvolutionDataset())
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionFixture<float>, framework::DatasetMode::PRECOMMIT, datasets::SmallDepthwiseConvolutionDataset())
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-DISABLED_FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionFixture<float>, framework::DatasetMode::NIGHTLY, datasets::LargeDepthwiseConvolutionDataset())
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionFixture<float>, framework::DatasetMode::NIGHTLY, datasets::LargeDepthwiseConvolutionDataset())
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
diff --git a/tests/validation/CPP/DepthwiseConvolution.cpp b/tests/validation/CPP/DepthwiseConvolution.cpp
index ce30bed640..ae54494c03 100644
--- a/tests/validation/CPP/DepthwiseConvolution.cpp
+++ b/tests/validation/CPP/DepthwiseConvolution.cpp
@@ -57,37 +57,42 @@ SimpleTensor<T> depthwise_convolution(const SimpleTensor<T> &src, const SimpleTe
const size_t input_width = src.shape().x();
const size_t input_height = src.shape().y();
const size_t input_depth = src.shape().z();
+ const int num_batches = src.shape().total_size() / (input_width * input_height * input_depth);
- const size_t filter_half_size = filter_width / 2;
- const size_t pad_x = std::min(filter_half_size, static_cast<size_t>(conv_info.pad().first));
- const size_t pad_y = std::min(filter_half_size, static_cast<size_t>(conv_info.pad().second));
- const size_t minimum_x = -pad_x + filter_half_size;
- const size_t minimum_y = -pad_y + filter_half_size;
+ const size_t filter_half_width = filter_width / 2;
+ const size_t filter_half_height = filter_height / 2;
+ const size_t pad_x = std::min(filter_half_width, static_cast<size_t>(conv_info.pad().first));
+ const size_t pad_y = std::min(filter_half_height, static_cast<size_t>(conv_info.pad().second));
+ const size_t minimum_x = -pad_x + filter_half_width;
+ const size_t minimum_y = -pad_y + filter_half_height;
int out_pos = 0;
- for(size_t z = 0; z < input_depth; ++z)
+ for(int r = 0; r < num_batches; ++r)
{
- for(size_t y = minimum_y; y < input_height + pad_y - filter_half_size; y += conv_info.stride().second)
+ for(size_t z = 0; z < input_depth; ++z)
{
- for(size_t x = minimum_x; x < input_width + pad_x - filter_half_size; x += conv_info.stride().first)
+ for(size_t y = minimum_y; y < input_height - minimum_y; y += conv_info.stride().second)
{
- Coordinates coords(static_cast<int>(x), static_cast<int>(y), static_cast<int>(z));
- size_t filter_offset = filter_plane * z;
-
- T val = 0;
- for(int j = y - filter_half_size; j <= static_cast<int>(y + filter_half_size); ++j)
+ for(size_t x = minimum_x; x < input_width - minimum_x; x += conv_info.stride().first)
{
- for(int i = x - filter_half_size; i <= static_cast<int>(x + filter_half_size); ++i)
+ Coordinates coords(static_cast<int>(x), static_cast<int>(y), static_cast<int>(z), static_cast<int>(r));
+ size_t filter_offset = filter_plane * z;
+
+ T val = 0;
+ for(int j = y - filter_half_height; j <= static_cast<int>(y + filter_half_height); ++j)
{
- coords.set(0, i);
- coords.set(1, j);
- val += *(weights.data() + filter_offset) * tensor_elem_at(src, coords, BorderMode::CONSTANT, 0.f);
- ++filter_offset;
+ for(int i = x - filter_half_width; i <= static_cast<int>(x + filter_half_width); ++i)
+ {
+ coords.set(0, i);
+ coords.set(1, j);
+ val += *(weights.data() + filter_offset) * tensor_elem_at(src, coords, BorderMode::CONSTANT, 0.f);
+ ++filter_offset;
+ }
}
+ coords.set(0, x);
+ coords.set(1, y);
+ dst[out_pos++] = saturate_cast<T>(val);
}
- coords.set(0, x);
- coords.set(1, y);
- dst[out_pos++] = saturate_cast<T>(val);
}
}
}