aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2019-06-03 14:59:48 +0100
committerPablo Marquez <pablo.tello@arm.com>2019-06-20 16:24:33 +0000
commita28aebc5f80630683f50ce934e8584c6b0004beb (patch)
tree0764f6b51f76985ac3c788d123bb8f1e50da9ff5
parente16c8906a2aedf00e910754a01fca8bc4189cfc7 (diff)
downloadComputeLibrary-a28aebc5f80630683f50ce934e8584c6b0004beb.tar.gz
COMPMID-2383: DepthwiseConv use output qinfo
There was a problem in depthwise_conv where when needing to run permute it was calling auto_init to initialise the temporary tensor with input's qinfo instead of output's qinfo. This patch fixes this and adds some tests to exercise the cases where we have different qinfos Change-Id: I396fbb4d73f6aa5efe03dc2dd727b3e13154d6f5 Signed-off-by: Pablo Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/1270 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp7
-rw-r--r--src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp2
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp1
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp2
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayer.cpp72
-rw-r--r--tests/validation/NEON/DepthwiseConvolutionLayer.cpp81
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h52
9 files changed, 126 insertions, 98 deletions
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
index 615327a7cc..fe796e96b8 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp
@@ -88,7 +88,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
{
// Output auto inizialitation if not yet initialized
const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape));
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_quantization_info(output->quantization_info()));
const unsigned int conv_stride_x = conv_info.stride().first;
const unsigned int conv_stride_y = conv_info.stride().second;
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
index f489e82cb9..779cf25fdf 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp
@@ -109,12 +109,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(
*input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), conv_info, depth_multiplier, dilation);
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output,
- output_shape,
- 1,
- input->data_type(),
- input->quantization_info());
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_quantization_info(output->quantization_info()));
const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type());
const bool is_stride_1_dilation_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1) && dilation.x() == 1 && dilation.y() == 1);
diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
index 385be04e4a..e47786525e 100644
--- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -195,7 +195,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
const DataType output_dt = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type();
// Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output, input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
+ auto_init_if_empty(*output, input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt).set_quantization_info(output->quantization_info()));
// Configure kernel window (generic)
const unsigned int conv_stride_x = conv_info.stride().first;
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index e912740d69..16082775c3 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -90,6 +90,7 @@ void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor
// Configure the function to transform the weights tensor from HWI -> IHW
_permute_weights_to_nchw.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
_permuted_weights.info()->set_data_layout(DataLayout::NCHW);
+ _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
input_to_use = &_permuted_input;
weights_to_use = &_permuted_weights;
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index 4bc8439d93..43288ec4c6 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -87,6 +87,7 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor
// Configure the function to transform the weights tensor from HWI -> IHW
_permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
_permuted_weights.info()->set_data_layout(DataLayout::NCHW);
+ _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
// Configure depthwise
_dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier, dilation);
@@ -164,6 +165,9 @@ void NEDepthwiseConvolutionLayer3x3::configure_optimized(const ITensor
_permute_weights.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
_permuted_weights.info()->set_data_layout(DataLayout::NHWC);
+ _permuted_output.info()->set_data_layout(DataLayout::NHWC);
+ _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
+
// Configure optimized depthwise
_dwc_optimized_func.configure(&_permuted_input, &_permuted_weights, biases, &_permuted_output, conv_info, depth_multiplier, act_info_to_use);
@@ -423,6 +427,7 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh
permute(output_shape, PermutationVector(1U, 2U, 0U));
_permuted_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape));
_permuted_output.info()->set_data_layout(DataLayout::NCHW);
+ _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
output_to_use = &_permuted_output;
}
diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
index 0499d9930f..5f57bbfe23 100644
--- a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
@@ -176,7 +176,7 @@ void NEDepthwiseConvolutionAssemblyDispatch::configure(const ITensor
// Output auto inizialitation if not yet initialized
const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier);
- auto_init_if_empty(*output->info(), input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape));
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_quantization_info(output->info()->quantization_info()));
_input = input;
_weights = weights;
diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
index 274a0f523a..0b60156baf 100644
--- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
@@ -557,20 +557,22 @@ TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
TEST_SUITE(Generic)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.3f, 4) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.1f, 2) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
@@ -578,20 +580,22 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uin
}
TEST_SUITE(Dilation)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.8, 1) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.9f, 11) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
@@ -601,20 +605,22 @@ TEST_SUITE_END() // Dilation
TEST_SUITE_END() // Generic
TEST_SUITE(W3x3)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.3f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
@@ -622,20 +628,22 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uin
}
TEST_SUITE(Dilation)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
diff --git a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
index 8eefec37d5..773ebdeacc 100644
--- a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
@@ -488,10 +488,11 @@ TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
TEST_SUITE(Generic)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.3f, 4) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
@@ -500,20 +501,22 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<uin
TEST_SUITE(Dilation)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.8f, 1) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.9f, 11) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
@@ -523,41 +526,45 @@ TEST_SUITE_END() //Dilation
TEST_SUITE_END() // Generic
TEST_SUITE(W3x3)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.3f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunOptimizedSmall, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(),
- framework::dataset::make("DepthMultiplier", 1)),
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DepthMultiplier", 1)),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunOptimizedLarge, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeOptimizedDepthwiseConvolutionLayerDataset3x3(),
- framework::dataset::make("DepthMultiplier", 1)),
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeOptimizedDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DepthMultiplier", 1)),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
@@ -567,19 +574,21 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture3x3<
TEST_SUITE(Dilation)
FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.7f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture3x3<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
- depth_multipliers),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
+ depth_multipliers),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 9e6dd4bd28..04c073b521 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -56,10 +56,12 @@ public:
public:
template <typename...>
- void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, QuantizationInfo quantization_info,
- DataLayout data_layout, ActivationLayerInfo act_info)
+ void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type,
+ QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
{
- _quantization_info = quantization_info;
+ _input_quantization_info = input_quantization_info;
+ _output_quantization_info = output_quantization_info;
+
_data_type = data_type;
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
@@ -72,8 +74,10 @@ public:
weights_shape.set(2, out_shape.z());
const TensorShape biases_shape(weights_shape[2]);
- _target = compute_target(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier, data_type, bias_data_type, quantization_info, data_layout, act_info);
- _reference = compute_reference(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier, data_type, bias_data_type, quantization_info, act_info);
+ _target = compute_target(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier,
+ data_type, bias_data_type, input_quantization_info, output_quantization_info, data_layout, act_info);
+ _reference = compute_reference(in_shape, weights_shape, biases_shape, out_shape, pad_stride_info, dilation, depth_multiplier,
+ data_type, bias_data_type, input_quantization_info, output_quantization_info, act_info);
}
protected:
@@ -108,7 +112,9 @@ protected:
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape output_shape, PadStrideInfo &pad_stride_info, Size2D dilation,
unsigned int depth_multiplier,
- const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info, const DataLayout data_layout, ActivationLayerInfo act_info)
+ const DataType data_type, const DataType bias_data_type,
+ const QuantizationInfo input_quantization_info, const QuantizationInfo output_quantization_info,
+ const DataLayout data_layout, ActivationLayerInfo act_info)
{
if(data_layout == DataLayout::NHWC)
{
@@ -118,10 +124,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout);
- TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, quantization_info, data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_quantization_info, data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, input_quantization_info, data_layout);
+ TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, input_quantization_info, data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, output_quantization_info, data_layout);
// Create Depthwise Convolution configure function
FunctionType dwc;
@@ -154,26 +160,30 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &in_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const TensorShape &out_shape, const PadStrideInfo &pad_stride_info,
+ SimpleTensor<T> compute_reference(const TensorShape &in_shape, const TensorShape &weights_shape, const TensorShape &biases_shape, const TensorShape &out_shape,
+ const PadStrideInfo &pad_stride_info,
const Size2D &dilation, unsigned int depth_multiplier,
- const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+ const DataType data_type, const DataType bias_data_type,
+ const QuantizationInfo input_quantization_info, const QuantizationInfo output_quantization_info,
+ ActivationLayerInfo act_info)
{
- SimpleTensor<T> src{ in_shape, data_type, 1, quantization_info };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info };
- SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, quantization_info };
+ SimpleTensor<T> src{ in_shape, data_type, 1, input_quantization_info };
+ SimpleTensor<T> weights{ weights_shape, data_type, 1, input_quantization_info };
+ SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, input_quantization_info };
fill(src, 0);
fill(weights, 1);
fill(biases, 2);
- SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, out_shape, pad_stride_info, depth_multiplier, dilation);
+ SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, out_shape, pad_stride_info, depth_multiplier, dilation, output_quantization_info);
return (act_info.enabled()) ? reference::activation_layer<T>(depth_out, act_info) : depth_out;
}
TensorType _target{};
SimpleTensor<T> _reference{};
DataType _data_type{};
- QuantizationInfo _quantization_info{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -185,7 +195,7 @@ public:
ActivationLayerInfo act_info)
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
- data_type, QuantizationInfo(), data_layout, act_info);
+ data_type, QuantizationInfo(), QuantizationInfo(), data_layout, act_info);
}
};
@@ -194,11 +204,11 @@ class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConv
{
public:
template <typename...>
- void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, QuantizationInfo quantization_info,
- DataLayout data_layout, ActivationLayerInfo act_info)
+ void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type,
+ QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
- data_type, quantization_info, data_layout, act_info);
+ data_type, input_quantization_info, output_quantization_info, data_layout, act_info);
}
};
} // namespace validation