aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorsteniu01 <steven.niu@arm.com>2017-07-18 17:37:43 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit27b386cb7596542a3296c32e41f7a5168b4d53be (patch)
tree8c4eb09de748069f4426dd012798933fadc88e03 /src
parent1b80b6c7255e41257fed3b4dd0fa018e2eeee4c2 (diff)
downloadComputeLibrary-27b386cb7596542a3296c32e41f7a5168b4d53be.tar.gz
COMPMID-355 Implement 3x3 CL direct convolution
Change-Id: I1b44dc375045964e65557f0ead57a7c12d6bf097 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/81418 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp5
-rw-r--r--src/core/CL/cl_kernels/direct_convolution.cl227
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp171
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp50
4 files changed, 453 insertions, 0 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 769d50992d..8f6ec20fc3 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -145,6 +145,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "copy_to_keypoint", "fast_corners.cl" },
{ "derivative", "derivative.cl" },
{ "dilate", "dilate.cl" },
+ { "direct_convolution3x3", "direct_convolution.cl" },
{ "erode", "erode.cl" },
{ "fast_corners", "fast_corners.cl" },
{ "fill_image_borders_constant", "fill_border.cl" },
@@ -348,6 +349,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/dilate.clembed"
},
{
+ "direct_convolution.cl",
+#include "./cl_kernels/direct_convolution.clembed"
+ },
+ {
"erode.cl",
#include "./cl_kernels/erode.clembed"
},
diff --git a/src/core/CL/cl_kernels/direct_convolution.cl b/src/core/CL/cl_kernels/direct_convolution.cl
new file mode 100644
index 0000000000..b5524e1d4b
--- /dev/null
+++ b/src/core/CL/cl_kernels/direct_convolution.cl
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2016, 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if STRIDE_X == 2
+#define CONVOLVE1x3(left_pixel_position, left_coeff, middle_coeff, right_coeff) convolution1x3_stride2(left_pixel_position, left_coeff, middle_coeff, right_coeff)
+#elif STRIDE_X == 1 /* STRIDE_X == 1 */
+#define CONVOLVE1x3(left_pixel_position, left_coeff, middle_coeff, right_coeff) convolution1x3_stride1(left_pixel_position, left_coeff, middle_coeff, right_coeff)
+#else /* STRIDE_X not equals 1 or 2 */
+#error "STRIDE_X larger than 2 is not supported"
+#endif /* STRIDE_X == 2 */
+
+/** Compute a 1D horizontal convolution of size 3 with stride as 1.
+ *
+ * @param[in] left_pixel Pointer to the left pixel.
+ * @param[in] left_coeff Weight of the left pixel
+ * @param[in] middle_coeff Weight of the middle pixel
+ * @param[in] right_coeff Weight of the right pixel
+ *
+ * @return a convoluted values.
+ */
+inline VEC_DATA_TYPE(DATA_TYPE, 8) convolution1x3_stride1(__global const DATA_TYPE *left_pixel,
+ const DATA_TYPE left_coeff,
+ const DATA_TYPE middle_coeff,
+ const DATA_TYPE right_coeff)
+{
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ temp = vload16(0, left_pixel);
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ left = temp.s01234567;
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ middle = temp.s12345678;
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ right = temp.s23456789;
+
+ return left * (VEC_DATA_TYPE(DATA_TYPE, 8))left_coeff + middle * (VEC_DATA_TYPE(DATA_TYPE, 8))middle_coeff + right * (VEC_DATA_TYPE(DATA_TYPE, 8))right_coeff;
+}
+
+/** Compute a 1D horizontal convolution of size 3 with stride as 2.
+ *
+ * @param[in] left_pixel Pointer to the left pixel.
+ * @param[in] left_coeff Weight of the left pixel
+ * @param[in] middle_coeff Weight of the middle pixel
+ * @param[in] right_coeff Weight of the right pixel
+ *
+ * @return a convoluted values.
+ */
+inline VEC_DATA_TYPE(DATA_TYPE, 8) convolution1x3_stride2(__global const DATA_TYPE *left_pixel,
+ const DATA_TYPE left_coeff,
+ const DATA_TYPE middle_coeff,
+ const DATA_TYPE right_coeff)
+{
+ const int stride_size = 2;
+
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ temp1 = vload16(0, left_pixel);
+
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ temp2 = vload16(0, left_pixel + 8);
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ left = (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s0246, temp2.s0246);
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ middle = (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s1357, temp2.s1357);
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ right = (VEC_DATA_TYPE(DATA_TYPE, 8))(temp1.s2468, temp2.s2468);
+
+ return left * (VEC_DATA_TYPE(DATA_TYPE, 8))left_coeff + middle * (VEC_DATA_TYPE(DATA_TYPE, 8))middle_coeff + right * (VEC_DATA_TYPE(DATA_TYPE, 8))right_coeff;
+}
+
+/** Apply a 3x3 2D convolution matrix on the input and return the result.
+ *
+ * Convolution matrix layout:
+ *
+ * [ mat0, mat1, mat2 ]\n
+ * [ mat3, mat4, mat5 ]\n
+ * [ mat6, mat7, mat8 ]\n
+ *
+ * @param[in] src A pointer to source Image structure
+ * @param[in] mat0 Coefficient from the convolution matrix
+ * @param[in] mat1 Coefficient from the convolution matrix
+ * @param[in] mat2 Coefficient from the convolution matrix
+ * @param[in] mat3 Coefficient from the convolution matrix
+ * @param[in] mat4 Coefficient from the convolution matrix
+ * @param[in] mat5 Coefficient from the convolution matrix
+ * @param[in] mat6 Coefficient from the convolution matrix
+ * @param[in] mat0 Coefficient from the convolution matrix
+ * @param[in] mat7 Coefficient from the convolution matrix
+ * @param[in] mat8 Coefficient from the convolution matrix
+ *
+ * @return convoluted values.
+ */
+inline VEC_DATA_TYPE(DATA_TYPE, 8) convolution3x3(
+ Image *src,
+ const DATA_TYPE mat0, const DATA_TYPE mat1, const DATA_TYPE mat2,
+ const DATA_TYPE mat3, const DATA_TYPE mat4, const DATA_TYPE mat5,
+ const DATA_TYPE mat6, const DATA_TYPE mat7, const DATA_TYPE mat8)
+{
+ // Output pixels
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ pixels;
+
+ // Row 0
+ pixels = CONVOLVE1x3((__global DATA_TYPE *)offset(src, 0, 0), mat0, mat1, mat2);
+ // Row
+ pixels += CONVOLVE1x3((__global DATA_TYPE *)offset(src, 0, 1), mat3, mat4, mat5);
+ // Row 2
+ pixels += CONVOLVE1x3((__global DATA_TYPE *)offset(src, 0, 2), mat6, mat7, mat8);
+
+ return pixels;
+}
+
+/** This kernel performs a direct convolution to convolve the low three dimensions.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The convolution stride x and stride y must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y: e.g. -DSTRIDE_X=1, _DSTRIDE_Y=1
+ * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[out] weights_ptr Pointer to the weights tensor. Supported data types: same as @p weights_ptr
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
+ * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
+ * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
+ * @param[in] weights_stride_w Stride of the weights tensor in W dimension
+ * @param[in] filter_depth The depth size of the filter
+ */
+__kernel void direct_convolution3x3(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(weights),
+#ifdef HAS_BIAS
+ VECTOR_DECLARATION(biases),
+#endif /* defined(HAS_BIAS) */
+ unsigned int weights_stride_w,
+ unsigned int filter_depth)
+{
+ Image src = CONVERT_TO_IMAGE_STRUCT(src);
+ Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+
+#ifdef HAS_BIAS
+ Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
+#endif /* defined(HAS_BIAS) */
+
+ VEC_DATA_TYPE(DATA_TYPE, 8)
+ pixels = 0;
+
+ const uint z_index = get_global_id(2);
+
+ weights.ptr += z_index * weights_stride_w;
+
+ for(int d = 0; d < filter_depth; ++d)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ weights_row1 = vload4(0, (__global DATA_TYPE *)tensor3D_offset(&weights, 0, 0, 0));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ weights_row2 = vload4(0, (__global DATA_TYPE *)tensor3D_offset(&weights, 0, 1, 0));
+ VEC_DATA_TYPE(DATA_TYPE, 4)
+ weights_row3 = vload4(0, (__global DATA_TYPE *)tensor3D_offset(&weights, 0, 2, 0));
+
+ pixels += convolution3x3(&src, weights_row1.s0,
+ weights_row1.s1,
+ weights_row1.s2,
+ weights_row2.s0,
+ weights_row2.s1,
+ weights_row2.s2,
+ weights_row3.s0,
+ weights_row3.s1,
+ weights_row3.s2);
+
+ src.ptr += src_stride_z;
+ weights.ptr += weights_stride_z;
+ }
+
+#ifdef HAS_BIAS
+ pixels += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, z_index)));
+#endif /* defined(HAS_BIAS) */
+
+ vstore8(pixels, 0, (__global DATA_TYPE *)dst.ptr);
+}
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
new file mode 100644
index 0000000000..7f9e9d20e1
--- /dev/null
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+template <unsigned int kernel_size>
+CLDirectConvolutionLayerKernel<kernel_size>::CLDirectConvolutionLayerKernel()
+ : _input(nullptr), _biases(nullptr), _weights(nullptr), _output(nullptr), _border_size(0), _conv_pad_x(0), _conv_pad_y(0), _conv_stride_x(0), _conv_stride_y(0)
+{
+}
+
+template <unsigned int kernel_size>
+BorderSize CLDirectConvolutionLayerKernel<kernel_size>::border_size() const
+{
+ return _border_size;
+}
+
+template <unsigned int kernel_size>
+void CLDirectConvolutionLayerKernel<kernel_size>::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+{
+ static_assert(kernel_size == 3, "Currently only 3x3 direct convolution is supported!");
+
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
+ ARM_COMPUTE_ERROR_ON(weights->info()->dimension(2) != input->info()->dimension(2));
+ ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != weights->info()->dimension(1));
+ ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 4);
+ ARM_COMPUTE_ERROR_ON_MSG((kernel_size == 3 && std::get<0>(conv_info.stride()) > 2), "Strides larger than 2 not supported in 3x3 direct convolution!");
+
+ ARM_COMPUTE_ERROR_ON(kernel_size != weights->info()->dimension(0));
+
+ if(biases != nullptr)
+ {
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
+ ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(3));
+ ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
+ }
+
+ _conv_stride_x = std::get<0>(conv_info.stride());
+ _conv_stride_y = std::get<1>(conv_info.stride());
+ _conv_pad_x = std::get<0>(conv_info.pad());
+ _conv_pad_y = std::get<1>(conv_info.pad());
+
+ _input = input;
+ _weights = weights;
+ _output = output;
+ _biases = biases;
+ _border_size = BorderSize(_conv_pad_y, _conv_pad_x);
+
+ std::stringstream kernel_name;
+ std::set<std::string> options;
+ kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
+
+ options.insert("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+
+ options.emplace("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
+
+ if(_biases != nullptr)
+ {
+ options.emplace("-DHAS_BIAS");
+ }
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name.str(), options));
+
+ unsigned int idx = (_biases == nullptr) ? 3 * num_arguments_per_3D_tensor() : (num_arguments_per_1D_tensor() + 3 * num_arguments_per_3D_tensor());
+ _kernel.setArg<cl_uint>(idx++, _weights->info()->strides_in_bytes()[3]); // weights_stride_w
+ _kernel.setArg<cl_uint>(idx++, _weights->info()->dimension(2)); // filter depth
+
+ // Using this local workgroup size gives better performance over others that have been tried.
+ _lws_hint = cl::NDRange(4, 1, 8);
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info());
+
+ unsigned int num_elems_read_per_iteration = 16 * _conv_stride_x;
+ unsigned int num_elems_written_per_iteration = 8;
+
+ // Calculate right and bottom border
+ const int input_width = input->info()->dimension(0);
+ const int input_height = input->info()->dimension(1);
+ const int upper_bound_w = ceil_to_multiple(((output->info()->dimension(0) - 1) * _conv_stride_x + kernel_size), num_elems_read_per_iteration) - _conv_pad_x - input_width;
+ const int upper_bound_h = ((output->info()->dimension(1) - 1) * _conv_stride_y - _conv_pad_y + kernel_size) - input_height;
+ const int padding_right = std::max(upper_bound_w, static_cast<int>(kernel_size));
+ const int padding_bottom = std::max(upper_bound_h, static_cast<int>(kernel_size));
+
+ // Create window and update padding
+ win = calculate_max_window(*output->info(), Steps(num_elems_written_per_iteration));
+ AccessWindowStatic input_access(input->info(), -_conv_pad_x, -_conv_pad_y, input_width + padding_right, input_height + padding_bottom);
+
+ AccessWindowStatic weights_access(weights->info(), 0, 0, kernel_size, kernel_size);
+ AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
+ update_window_and_padding(win, input_access, weights_access, output_access);
+
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
+
+ ICLKernel::configure(win);
+}
+
+template <unsigned int kernel_size>
+void CLDirectConvolutionLayerKernel<kernel_size>::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ // Get initial windows
+ Window slice = window.first_slice_window_3D();
+ Window win_in = window;
+
+ win_in.adjust(Window::DimX, -_conv_pad_x, true);
+ win_in.adjust(Window::DimY, -_conv_pad_y, true);
+ win_in.set_dimension_step(Window::DimX, window.x().step() * _conv_stride_x);
+ win_in.set_dimension_step(Window::DimY, window.y().step() * _conv_stride_y);
+
+ Window slice_in = win_in.first_slice_window_3D();
+
+ unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
+ add_3D_tensor_argument(idx1, _weights, slice);
+
+ if(_biases != nullptr)
+ {
+ Window slice_biases;
+ slice_biases.use_tensor_dimensions(_biases->info());
+ add_1D_tensor_argument(idx1, _biases, slice_biases);
+ }
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice_in);
+ add_3D_tensor_argument(idx, _output, slice);
+
+ enqueue(queue, *this, slice, _lws_hint);
+ }
+ while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
+}
+
+template class arm_compute::CLDirectConvolutionLayerKernel<3>;
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
new file mode 100644
index 0000000000..65be417afb
--- /dev/null
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
+
+#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
+#include "arm_compute/core/PixelValue.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+using namespace arm_compute;
+
+CLDirectConvolutionLayer::CLDirectConvolutionLayer()
+ : _direct_conv_kernel(), _input_border_handler()
+{
+}
+
+void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
+{
+ _direct_conv_kernel.configure(input, weights, biases, output, conv_info);
+
+ _input_border_handler.configure(input, _direct_conv_kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
+}
+
+void CLDirectConvolutionLayer::run()
+{
+ CLScheduler::get().enqueue(_input_border_handler, false);
+ CLScheduler::get().enqueue(_direct_conv_kernel);
+}