aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDmitry Savenko <dsavenko@xored.com>2017-11-20 22:00:08 +0700
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitd7295b7079f6b9126596cea998146ca9c6e87706 (patch)
treebcefca66765ec120090c437621388debe70ae21d /src
parent900b78f599ea5997d60e7538831a906b92265ae0 (diff)
downloadComputeLibrary-d7295b7079f6b9126596cea998146ca9c6e87706.tar.gz
COMPMID-661: Add QASYMM8 support (and basic tests) to CLDepthwiseConvolution3x3 kernel (#28)
Change-Id: I51bebe74e3814c1245812ad575fe7854d460674f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/109864 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp5
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution_quantized.cl258
-rw-r--r--src/core/CL/cl_kernels/helpers_asymm.h2
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp45
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolution.cpp6
5 files changed, 303 insertions, 13 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 94cc02a705..9a2bb81708 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -187,6 +187,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
{ "depthwise_convolution_3x3", "depthwise_convolution.cl" },
+ { "depthwise_convolution_3x3_quantized", "depthwise_convolution_quantized.cl" },
{ "depthwise_im2col", "depthwise_convolution.cl" },
{ "depthwise_vector_to_tensor", "depthwise_convolution.cl" },
{ "depthwise_weights_reshape", "depthwise_convolution.cl" },
@@ -419,6 +420,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/depthwise_convolution.clembed"
},
{
+ "depthwise_convolution_quantized.cl",
+#include "./cl_kernels/depthwise_convolution_quantized.clembed"
+ },
+ {
"dequantization_layer.cl",
#include "./cl_kernels/dequantization_layer.clembed"
},
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
new file mode 100644
index 0000000000..19a509bd0a
--- /dev/null
+++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers_asymm.h"
+
+#if defined(CONV_STRIDE_X)
+
+#if CONV_STRIDE_X == 1
+#define convolution1x3 convolution1x3_stride_1
+#elif CONV_STRIDE_X == 2
+#define convolution1x3 convolution1x3_stride_2
+#elif CONV_STRIDE_X == 3
+#define convolution1x3 convolution1x3_stride_3
+#else /* CONV_STRIDE_X */
+#error "Stride not supported"
+#endif /* CONV_STRIDE_X */
+
+/** Compute a 1D horizontal convolution of size 3 and stride 1 for uchar type.
+ *
+ * @param[in] left_pixel Pointer to the left pixel.
+ * @param[in] left_coeff Weight of the left pixel
+ * @param[in] middle_coeff Weight of the middle pixel
+ * @param[in] right_coeff Weight of the right pixel
+ * @param[in] input_offset Quantized offset of zero point of the input tensor data range
+ * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
+ *
+ * @return a int2 containing 2 convoluted values.
+ */
+inline int2 convolution1x3_stride_1(__global const uchar *left_pixel,
+ const int left_coeff,
+ const int middle_coeff,
+ const int right_coeff,
+ const int input_offset,
+ const int weight_offset)
+{
+ int4 temp = CONVERT(vload4(0, left_pixel), int4);
+
+ int2 left = CONVERT(temp.s01, int2);
+ int2 middle = CONVERT(temp.s12, int2);
+ int2 right = CONVERT(temp.s23, int2);
+
+ return (left + input_offset) * (int2)(left_coeff + weight_offset) + (middle + input_offset) * (int2)(middle_coeff + weight_offset) + (right + input_offset) * (int2)(right_coeff + weight_offset);
+}
+
+/** Compute a 1D horizontal convolution of size 3 and stride 2 for uchar type.
+ *
+ * @param[in] left_pixel Pointer to the left pixel.
+ * @param[in] left_coeff Weight of the left pixel
+ * @param[in] middle_coeff Weight of the middle pixel
+ * @param[in] right_coeff Weight of the right pixel
+ * @param[in] input_offset Quantized offset of zero point of the input tensor data range
+ * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
+ *
+ * @return a int2 containing 2 convoluted values.
+ */
+inline int2 convolution1x3_stride_2(__global const uchar *left_pixel,
+ const int left_coeff,
+ const int middle_coeff,
+ const int right_coeff,
+ const int input_offset,
+ const int weight_offset)
+{
+ int4 temp0 = CONVERT(vload4(0, left_pixel), int4);
+ int temp1 = CONVERT(*(left_pixel + 4 * sizeof(uchar)), int);
+
+ int2 left = CONVERT(temp0.s02, int2);
+ int2 middle = CONVERT(temp0.s13, int2);
+ int2 right = CONVERT((int2)(temp0.s2, temp1), int2);
+
+ return (left + input_offset) * (int2)(left_coeff + weight_offset) + (middle + input_offset) * (int2)(middle_coeff + weight_offset) + (right + input_offset) * (int2)(right_coeff + weight_offset);
+}
+
+/** Compute a 1D horizontal convolution of size 3 and stride 3 for uchar type.
+ *
+ * @param[in] left_pixel Pointer to the left pixel.
+ * @param[in] left_coeff Weight of the left pixel
+ * @param[in] middle_coeff Weight of the middle pixel
+ * @param[in] right_coeff Weight of the right pixel
+ * @param[in] input_offset Quantized offset of zero point of the input tensor data range
+ * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
+ *
+ * @return a int2 containing 2 convoluted values.
+ */
+inline int2 convolution1x3_stride_3(__global const uchar *left_pixel,
+ const int left_coeff,
+ const int middle_coeff,
+ const int right_coeff,
+ const int input_offset,
+ const int weight_offset)
+{
+ int4 temp0 = CONVERT(vload4(0, left_pixel), int4);
+ int2 temp1 = CONVERT(vload2(0, (left_pixel + 4 * sizeof(uchar))), int2);
+
+ int2 left = CONVERT(temp0.s03, int2);
+ int2 middle = CONVERT((int2)(temp0.s1, temp1.s0), int2);
+ int2 right = CONVERT((int2)(temp0.s2, temp1.s1), int2);
+
+ return (left + input_offset) * (int2)(left_coeff + weight_offset) + (middle + input_offset) * (int2)(middle_coeff + weight_offset) + (right + input_offset) * (int2)(right_coeff + weight_offset);
+}
+
+/** Apply a 3x3 convolution matrix to a single channel QASYMM8 input image and return the result.
+ *
+ * Convolution matrix layout:
+ *
+ * [ mat0, mat1, mat2 ]\n
+ * [ mat3, mat4, mat5 ]\n
+ * [ mat6, mat7, mat8 ]\n
+ *
+ * @param[in] src A pointer to source Image structure
+ * @param[in] mat0 Coefficient from the convolution matrix
+ * @param[in] mat1 Coefficient from the convolution matrix
+ * @param[in] mat2 Coefficient from the convolution matrix
+ * @param[in] mat3 Coefficient from the convolution matrix
+ * @param[in] mat4 Coefficient from the convolution matrix
+ * @param[in] mat5 Coefficient from the convolution matrix
+ * @param[in] mat6 Coefficient from the convolution matrix
+ * @param[in] mat7 Coefficient from the convolution matrix
+ * @param[in] mat8 Coefficient from the convolution matrix
+ * @param[in] input_offset Quantized offset of zero point of the input tensor data range
+ * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
+ * @param[in] output_offset Quantized offset of zero point of the output tensor data range
+ * @param[in] output_multiplier Output scale multiplier
+ * @param[in] output_shift Output scale divisor exponent
+ * @param[in] bias (Optional) Bias value
+ *
+ * @return a uchar2 containing 2 convoluted values.
+ */
+inline uchar2 convolution3x3(
+ Image *src,
+ const uchar mat0, const uchar mat1, const uchar mat2,
+ const uchar mat3, const uchar mat4, const uchar mat5,
+ const uchar mat6, const uchar mat7, const uchar mat8,
+ const int input_offset, const int weight_offset, const int output_offset,
+ const int output_multiplier, const int output_shift
+#if defined(HAS_BIAS)
+ ,
+ const int bias
+#endif //defined(HAS_BIAS)
+)
+{
+ int2 pixels;
+
+ pixels = convolution1x3(offset(src, 0, 0), mat0, mat1, mat2, input_offset, weight_offset);
+ pixels += convolution1x3(offset(src, 0, 1), mat3, mat4, mat5, input_offset, weight_offset);
+ pixels += convolution1x3(offset(src, 0, 2), mat6, mat7, mat8, input_offset, weight_offset);
+#if defined(HAS_BIAS)
+ pixels += (int2)(bias);
+#endif //defined(HAS_BIAS)
+
+ pixels = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(pixels, output_multiplier, output_shift, 2);
+ pixels = pixels + output_offset;
+ pixels = clamp(pixels, 0, 255);
+
+ return CONVERT(pixels, uchar2);
+}
+
+/** This function computes the horizontal integral of the image.
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8
+ * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
+ * @param[in] input_offset Quantized offset of zero point of the input tensor data range
+ * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
+ * @param[in] output_offset Quantized offset of zero point of the output tensor data range
+ * @param[in] output_multiplier Output scale multiplier
+ * @param[in] output_shift Output scale divisor exponent
+ */
+
+__kernel void depthwise_convolution_3x3_quantized(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(weights),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(biases),
+#endif //defined(HAS_BIAS)
+ int input_offset,
+ int weight_offset,
+ int output_offset,
+ int output_multiplier,
+ int output_shift)
+{
+ Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src);
+ Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
+ Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT(weights);
+#if defined(HAS_BIAS)
+ Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
+#endif //defined(HAS_BIAS)
+
+ uchar3 offset = (uchar3)(0, 1, 2) * (uchar3)weights_stride_y;
+ uchar3 weights_values0 = vload3(0, weights.ptr + offset.s0);
+ uchar3 weights_values1 = vload3(0, weights.ptr + offset.s1);
+ uchar3 weights_values2 = vload3(0, weights.ptr + offset.s2);
+
+#if defined(HAS_BIAS)
+ int bias_value = *((__global int *)(vector_offset(&biases, get_global_id(2))));
+#endif //defined(HAS_BIAS)
+
+ uchar2 pixels = convolution3x3(&src, weights_values0.s0, weights_values0.s1, weights_values0.s2,
+ weights_values1.s0, weights_values1.s1, weights_values1.s2,
+ weights_values2.s0, weights_values2.s1, weights_values2.s2,
+ input_offset, weight_offset, output_offset,
+ output_multiplier, output_shift
+#if defined(HAS_BIAS)
+ ,
+ bias_value
+#endif //defined(HAS_BIAS)
+ );
+
+ vstore2(pixels, 0, dst.ptr);
+}
+
+#endif //defined(CONV_STRIDE_X)
diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h
index 3c1d58bda1..b44d0f1fd2 100644
--- a/src/core/CL/cl_kernels/helpers_asymm.h
+++ b/src/core/CL/cl_kernels/helpers_asymm.h
@@ -44,6 +44,7 @@
return (x >> exponent) + select(zero, one, (x & mask) > threshold); \
}
+ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2)
ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8)
ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16)
@@ -80,6 +81,7 @@ ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16)
return select(ab_x2_high32, INT_MAX, overflow); \
}
+ASYMM_MULT_IMP(2)
ASYMM_MULT_IMP(8)
ASYMM_MULT_IMP(16)
diff --git a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
index 5f42450b9f..208d06d7cd 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
@@ -33,6 +33,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
using namespace arm_compute;
@@ -48,14 +49,22 @@ BorderSize CLDepthwiseConvolution3x3Kernel::border_size() const
void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights);
ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != 3 || weights->info()->dimension(1) != 3);
if(biases != nullptr)
{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
+ if(is_data_type_quantized_asymmetric(weights->info()->data_type()))
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
+ }
ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(2));
ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 1);
}
@@ -80,13 +89,12 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
// Set build options
ARM_COMPUTE_ERROR_ON(_conv_stride_x < 1 || _conv_stride_x > 3);
- std::set<std::string> options{ "-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x) };
- if(_biases != nullptr)
- {
- options.emplace("-DHAS_BIAS");
- }
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
+ build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_convolution_3x3", options));
+ std::string kernel_name = is_data_type_quantized_asymmetric(_input->info()->data_type()) ? "depthwise_convolution_3x3_quantized" : "depthwise_convolution_3x3";
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
// Configure kernel window
const unsigned int num_elems_processed_per_iteration = 2;
@@ -105,6 +113,23 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure(win);
+
+ // Set static arguments
+ if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
+ {
+ float multiplier = _input->info()->quantization_info().scale * _weights->info()->quantization_info().scale / _output->info()->quantization_info().scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+
+ unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0);
+
+ _kernel.setArg(idx++, -_input->info()->quantization_info().offset);
+ _kernel.setArg(idx++, -_weights->info()->quantization_info().offset);
+ _kernel.setArg(idx++, _output->info()->quantization_info().offset);
+ _kernel.setArg(idx++, output_multiplier);
+ _kernel.setArg(idx++, output_shift);
+ }
}
void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp b/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
index 156565950a..23a20a3011 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
@@ -37,9 +37,9 @@ CLDepthwiseConvolution3x3::CLDepthwiseConvolution3x3()
void CLDepthwiseConvolution3x3::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights);
_kernel.configure(input, weights, biases, output, conv_info);
_border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));