aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2019-10-15 09:29:13 +0100
committerSang-Hoon Park <sang-hoon.park@arm.com>2019-10-21 08:21:47 +0000
commitab5b1a279284bed350d3bb75f3d9d3aec6edca0e (patch)
treef77d077dea99d2969d5065d25b9141ff70e0ea90
parent422da26b44d517f38011a91888a1c5da4acfeed4 (diff)
downloadComputeLibrary-ab5b1a279284bed350d3bb75f3d9d3aec6edca0e.tar.gz
COMPMID-2744 [CL] add support for 9x9 quantized direct convolution
Change-Id: I858ce5b9a530f8568e154f5d724d267e142ef9b2 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/2091 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
-rw-r--r--src/core/CL/CLKernelLibrary.cpp6
-rw-r--r--src/core/CL/cl_kernels/direct_convolution_quantized.cl (renamed from src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl)63
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp28
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp18
4 files changed, 104 insertions, 11 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 0cd6e49824..b2905a848b 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -249,7 +249,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "direct_convolution5x5", "direct_convolution5x5.cl" },
{ "direct_convolution5x5_nhwc", "direct_convolution5x5.cl" },
{ "direct_convolution5x5_f32_bifrost", "direct_convolution5x5.cl" },
- { "direct_convolution_1x1_3x3_5x5_quantized", "direct_convolution_1x1_3x3_5x5_quantized.cl" },
+ { "direct_convolution_quantized", "direct_convolution_quantized.cl" },
{ "direct_convolution9x9_nhwc", "direct_convolution9x9.cl" },
{ "elementwise_operation_ADD", "elementwise_operation.cl" },
{ "elementwise_operation_SUB", "elementwise_operation.cl" },
@@ -717,8 +717,8 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/direct_convolution5x5.clembed"
},
{
- "direct_convolution_1x1_3x3_5x5_quantized.cl",
-#include "./cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.clembed"
+ "direct_convolution_quantized.cl",
+#include "./cl_kernels/direct_convolution_quantized.clembed"
},
{
"direct_convolution9x9.cl",
diff --git a/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl b/src/core/CL/cl_kernels/direct_convolution_quantized.cl
index 5ad9afb23c..1182428cd5 100644
--- a/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl
+++ b/src/core/CL/cl_kernels/direct_convolution_quantized.cl
@@ -27,7 +27,50 @@
#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-#if KERNEL_SIZE == 5
+#if KERNEL_SIZE == 9
+
+#if STRIDE_X == 1
+#define CONVOLUTION1x9(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr)
+#elif STRIDE_X == 2
+#define CONVOLUTION1x9(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr)
+#else /* STRIDE_X not equals 1 or 2 */
+#error "STRIDE_X larger than 2 is not supported"
+#endif /* STRIDE_X */
+
+#define CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
+ int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
+ int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
+ acc += (src0.lo + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
+ acc += ((int8)(src0.s1234, src0.s5678) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
+ acc += ((int8)(src0.s2345, src0.s6789) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
+ acc += ((int8)(src0.s3456, src0.s789A) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
+ acc += ((int8)(src0.s4567, src0.s89AB) + input_offset) * ((int8)weights_values0.s4 + weight_offset); \
+ acc += ((int8)(src0.s5678, src0.s9ABC) + input_offset) * ((int8)weights_values0.s5 + weight_offset); \
+ acc += ((int8)(src0.s6789, src0.sABCD) + input_offset) * ((int8)weights_values0.s6 + weight_offset); \
+ acc += ((int8)(src0.s789A, src0.sBCDE) + input_offset) * ((int8)weights_values0.s7 + weight_offset); \
+ acc += ((int8)(src0.s89AB, src0.sCDEF) + input_offset) * ((int8)weights_value1 + weight_offset); \
+ })
+
+#define CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
+ int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
+ int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
+ int8 src1 = convert_int8(vload8(0, src_row_ptr + 16)); \
+ acc += (src0.even + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
+ acc += ((int8)(src0.s1357, src0.s9BDF) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
+ acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
+ acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
+ acc += ((int8)(src0.s468A, src0.sCE, src1.s02) + input_offset) * ((int8)weights_values0.s4 + weight_offset); \
+ acc += ((int8)(src0.s579B, src0.sDF, src1.s13) + input_offset) * ((int8)weights_values0.s5 + weight_offset); \
+ acc += ((int8)(src0.s68AC, src0.sE, src1.s024) + input_offset) * ((int8)weights_values0.s6 + weight_offset); \
+ acc += ((int8)(src0.s79BD, src0.sF, src1.s135) + input_offset) * ((int8)weights_values0.s7 + weight_offset); \
+ acc += ((int8)(src0.s8ACE, src1.s0246) + input_offset) * ((int8)weights_value1 + weight_offset); \
+ })
+
+#elif KERNEL_SIZE == 5
#if STRIDE_X == 1
#define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr)
@@ -142,8 +185,8 @@ inline uchar8 extract_input_stride3(__global const uchar *input_pixel)
return (uchar8)(temp1.s0369, temp2.s0369);
}
-#else /* KERNEL_SIZE not equals 1, 3 or 5 */
-#error "Only kernel sizes 1, 3 and 5 are supported"
+#else /* KERNEL_SIZE not equals 1, 3 , 5, 9 */
+#error "Only kernel sizes 1, 3, 5 and 9 are supported"
#endif /* KERNEL_SIZE */
/** This kernel performs a direct convolution to convolve the low three dimensions.
@@ -187,7 +230,7 @@ inline uchar8 extract_input_stride3(__global const uchar *input_pixel)
* @param[in] output_multiplier Output integer multiplier quantization parameter
* @param[in] output_shift Output integer shift quantization parameter
*/
-__kernel void direct_convolution_1x1_3x3_5x5_quantized(
+__kernel void direct_convolution_quantized(
TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst),
TENSOR3D_DECLARATION(weights),
@@ -215,7 +258,17 @@ __kernel void direct_convolution_1x1_3x3_5x5_quantized(
for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
{
-#if KERNEL_SIZE == 5
+#if KERNEL_SIZE == 9
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 0 * src_stride_y), (__global uchar *)(weights_addr + 0 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 1 * src_stride_y), (__global uchar *)(weights_addr + 1 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 2 * src_stride_y), (__global uchar *)(weights_addr + 2 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 3 * src_stride_y), (__global uchar *)(weights_addr + 3 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 4 * src_stride_y), (__global uchar *)(weights_addr + 4 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 5 * src_stride_y), (__global uchar *)(weights_addr + 5 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 6 * src_stride_y), (__global uchar *)(weights_addr + 6 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 7 * src_stride_y), (__global uchar *)(weights_addr + 7 * weights_stride_y));
+ CONVOLUTION1x9(pixels0, (__global uchar *)(src_addr + 8 * src_stride_y), (__global uchar *)(weights_addr + 8 * weights_stride_y));
+#elif KERNEL_SIZE == 5
CONVOLUTION1x5(pixels0, (__global uchar *)src_addr, (__global uchar *)weights_addr);
CONVOLUTION1x5(pixels0, (__global uchar *)(src_addr + 1 * src_stride_y), (__global uchar *)(weights_addr + 1 * weights_stride_y));
CONVOLUTION1x5(pixels0, (__global uchar *)(src_addr + 2 * src_stride_y), (__global uchar *)(weights_addr + 2 * weights_stride_y));
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index 10119d8e8e..7b74a5a98c 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -56,14 +56,23 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
"Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) == 9 && input->data_type() == DataType::QASYMM8, "Kernel sizes of 9x9 is not supported for quantized types");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != input->dimension(channel_idx),
"Weights feature map dimension should match the respective input's one");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5) && std::get<0>(conv_info.stride()) > 2,
"Strides larger than 2 not supported for 3x3 convolution.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 9) && data_layout == DataLayout::NCHW, "Only NHWC layout is supported for 9x9 convolution.");
+
+ const auto data_type = input->data_type();
+
+ if(weights->dimension(width_idx) == 9)
+ {
+ const auto supported_data_layout = is_data_type_quantized(data_type) ? DataLayout::NCHW : DataLayout::NHWC;
+ const auto error_message = std::string("Only " + string_from_data_layout(supported_data_layout) + " layout is supported for 9x9 convolution with " + string_from_data_type(
+ data_type) + " type");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((supported_data_layout != data_layout), error_message.c_str());
+ }
if(biases != nullptr)
{
@@ -226,6 +235,19 @@ inline void setup_num_elems(unsigned int &num_elems_read_per_iteration_x, unsign
ARM_COMPUTE_ERROR("Invalid convolution stride X");
}
break;
+ case 9:
+ switch(conv_stride_x)
+ {
+ case 1:
+ num_elems_read_per_iteration_x = 16;
+ break;
+ case 2:
+ num_elems_read_per_iteration_x = 24;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Invalid convolution stride X");
+ }
+ break;
default:
ARM_COMPUTE_ERROR("Invalid direct convolution size");
}
@@ -487,7 +509,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
}
build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(is_quantized_asymm ? "direct_convolution_1x1_3x3_5x5_quantized" : kernel_name.str(),
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(is_quantized_asymm ? "direct_convolution_quantized" : kernel_name.str(),
build_options.options()));
}
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index 6c46374b54..5007738785 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -257,6 +257,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<uint8_
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit_9x9,
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) })),
+ QuantizedActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_nightly, framework::dataset::make("DataType",
DataType::QASYMM8)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) })),
@@ -265,6 +274,15 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<uint8_
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunLarge9x9, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_nightly_9x9, framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10) })),
+ QuantizedActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_CustomDataset)