From d2532e00deebede5e2a6148e79d4aa5c8bae25c5 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Mon, 19 Aug 2019 13:31:38 +0100 Subject: COMPMID-2597: Checking bias type in NEDeconvolutionLayer::validate even when bias == nullptr Change-Id: If7fb2e6d11e4653208b58ebdbfcc284d6c5a2e9a Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/1761 Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../NEON/functions/NEDeconvolutionLayer.cpp | 15 ++-- tests/validation/CL/DeconvolutionLayer.cpp | 85 ++++++++++++++-------- tests/validation/NEON/DeconvolutionLayer.cpp | 71 +++++++++++------- .../fixtures/DeconvolutionLayerFixture.h | 72 ++++++++++++++---- 4 files changed, 165 insertions(+), 78 deletions(-) diff --git a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp index 4db1346654..2a09ba4285 100644 --- a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp @@ -73,13 +73,16 @@ Status NEDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf info.pad().first, info.pad().second, stride_x, stride_y); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - if(is_data_type_quantized_asymmetric(input->data_type())) + if(bias != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + if(is_data_type_quantized_asymmetric(input->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); + } } if(output->tensor_shape().total_size() > 0) diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp index f8a677cb34..44b3428c52 100644 --- a/tests/validation/CL/DeconvolutionLayer.cpp +++ b/tests/validation/CL/DeconvolutionLayer.cpp @@ -65,6 +65,8 @@ const auto data1x1 = datasets::SmallDeconvolutionShapes() * framework::dataset:: * framework::dataset::make("PadY", 0, 1) * framework::dataset::make("NumKernels", { 3 }); const auto data_layouts_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }); + +const auto add_bias_dataset = framework::dataset::make("AddBias", { true, false }); } // namespace TEST_SUITE(CL) @@ -133,8 +135,9 @@ TEST_SUITE(Float) TEST_SUITE(FP32) TEST_SUITE(W4x4) -FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_fp32); @@ -142,14 +145,17 @@ FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4, framework::Da TEST_SUITE_END() // W4x4 TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(data3x3_precommit, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data3x3_precommit, framework::dataset::make("DataType", + DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_fp32); @@ -157,8 +163,10 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3, framewor TEST_SUITE_END() // W3x3 TEST_SUITE(W2x2) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2, framework::DatasetMode::PRECOMMIT, combine(combine(data2x2_precommit, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data2x2_precommit, framework::dataset::make("DataType", + DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_fp32); @@ -166,8 +174,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2, framewor TEST_SUITE_END() // W2x2 TEST_SUITE(W1x1) -FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_fp32); @@ -179,7 +188,9 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) TEST_SUITE(W4x4) -FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F16)), data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num); @@ -187,14 +198,17 @@ FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4, framework::Dat TEST_SUITE_END() // W4x4 TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(data3x3_precommit, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data3x3_precommit, framework::dataset::make("DataType", + DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num); @@ -202,8 +216,10 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerFixture3x3, framework TEST_SUITE_END() // W3x3 TEST_SUITE(W2x2) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2, framework::DatasetMode::PRECOMMIT, combine(combine(data2x2_precommit, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data2x2_precommit, framework::dataset::make("DataType", + DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -211,7 +227,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerFixture2x2, framework TEST_SUITE_END() // W2x2 TEST_SUITE(W1x1) -FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F16)), data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num); @@ -237,10 +255,11 @@ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) TEST_SUITE(W4x4) -FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(data4x4, framework::dataset::make("DataType", +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); @@ -248,18 +267,21 @@ FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4, fr TEST_SUITE_END() // W4x4 TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data3x3_precommit, framework::dataset::make("DataType", - DataType::QASYMM8)), +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data3x3_precommit, + framework::dataset::make("DataType", + DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data3x3, framework::dataset::make("DataType", +FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); @@ -267,10 +289,12 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data2x2_precommit, framework::dataset::make("DataType", - DataType::QASYMM8)), +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture2x2, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data2x2_precommit, + framework::dataset::make("DataType", + DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_fp32); @@ -278,10 +302,11 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture2x2, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType", +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 0))), + add_bias_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index 34a1bf5a45..727f501393 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -43,8 +43,8 @@ namespace validation { namespace { -constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ -constexpr AbsoluteTolerance tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ +constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ +constexpr AbsoluteTolerance tolerance_qasymm8(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC const RelativeTolerance tolerance_fp16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */ #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ @@ -63,6 +63,8 @@ const auto data1x1 = datasets::SmallDeconvolutionShapes() * framework::dataset:: * framework::dataset::make("PadY", 0, 1) * framework::dataset::make("NumKernels", { 3 }); const auto data_layouts_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }); + +const auto add_bias_dataset = framework::dataset::make("AddBias", { true, false }); } // namespace TEST_SUITE(NEON) @@ -166,30 +168,35 @@ using NEDeconvolutionLayerFixture1x1 = DeconvolutionValidationFixture, framework::DatasetMode::NIGHTLY, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } TEST_SUITE_END() // W4x4 TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE(RunSmall, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(data3x3_precommit, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunSmall, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data3x3_precommit, framework::dataset::make("DataType", + DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } TEST_SUITE_END() // W3x3 TEST_SUITE(W1x1) -FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::F32)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); @@ -200,30 +207,35 @@ TEST_SUITE_END() // FP32 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) TEST_SUITE(W4x4) -FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(data4x4, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } TEST_SUITE_END() // W4x4 TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE(RunSmall, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(data3x3_precommit, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunSmall, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data3x3_precommit, framework::dataset::make("DataType", + DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(data3x3, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } TEST_SUITE_END() // W3x3 TEST_SUITE(W1x1) -FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(data1x1, framework::dataset::make("DataType", DataType::F16)), - data_layouts_dataset)) +FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::F16)), + data_layouts_dataset), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); @@ -247,10 +259,11 @@ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) TEST_SUITE(W4x4) -FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(data4x4, framework::dataset::make("DataType", +FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedFixture4x4, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10))), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8, tolerance_num); @@ -258,18 +271,21 @@ FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedFixture4x4, fr TEST_SUITE_END() // W4x4 TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE(RunSmall, NEDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data3x3_precommit, framework::dataset::make("DataType", - DataType::QASYMM8)), +FIXTURE_DATA_TEST_CASE(RunSmall, NEDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data3x3_precommit, + framework::dataset::make("DataType", + DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10))), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8, tolerance_num); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data3x3, framework::dataset::make("DataType", +FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10))), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8, tolerance_num); @@ -277,10 +293,11 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::NIGHTLY, combine(combine(combine(data1x1, framework::dataset::make("DataType", +FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10)))) + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 10))), + add_bias_dataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8, tolerance_num); diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h index 8f15f04f0e..9f90f07c97 100644 --- a/tests/validation/fixtures/DeconvolutionLayerFixture.h +++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h @@ -51,15 +51,15 @@ public: public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, - DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) + DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, bool add_bias) { _data_type = data_type; _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; _data_layout = data_layout; _quantization_info = quantization_info; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); } protected: @@ -93,8 +93,30 @@ protected: } } + template + void fill_zeros(U &&tensor) + { + switch(tensor.data_type()) + { + case DataType::S32: + { + const int32_t value = static_cast(tensor.quantization_info().uniform().offset); + library->fill_tensor_value(tensor, value); + break; + } + case DataType::F16: + library->fill_tensor_value(tensor, static_cast(0.0f)); + break; + case DataType::F32: + library->fill_tensor_value(tensor, static_cast(0.0f)); + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + } + } + TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape bias_shape, TensorShape output_shape, - const PadStrideInfo &info) + const PadStrideInfo &info, bool add_bias) { if(_data_layout == DataLayout::NHWC) { @@ -111,28 +133,40 @@ protected: // Create and configure function FunctionType conv; - conv.configure(&src, &weights, &bias, &dst, info); + conv.configure(&src, &weights, add_bias ? &bias : nullptr, &dst, info); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + if(add_bias) + { + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + } ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Allocate tensors src.allocator()->allocate(); weights.allocator()->allocate(); - bias.allocator()->allocate(); + if(add_bias) + { + bias.allocator()->allocate(); + } dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); + if(add_bias) + { + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); + } ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors fill(AccessorType(src), 0); fill(AccessorType(weights), 1); - fill(AccessorType(bias), 2); + if(add_bias) + { + fill(AccessorType(bias), 2); + } // Compute DeconvolutionLayer function conv.run(); @@ -141,7 +175,7 @@ protected: } SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, - const PadStrideInfo &info) + const PadStrideInfo &info, bool add_bias) { // Create reference SimpleTensor src{ input_shape, _data_type, 1, _quantization_info }; @@ -151,7 +185,15 @@ protected: // Fill reference fill(src, 0); fill(weights, 1); - fill(bias, 2); + + if(add_bias) + { + fill(bias, 2); + } + else + { + fill_zeros(bias); + } return reference::deconvolution_layer(src, weights, bias, output_shape, info); } @@ -170,7 +212,7 @@ class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, - unsigned int num_kernels, DataType data_type, DataLayout data_layout) + unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) { ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); @@ -180,7 +222,7 @@ public: TensorInfo input_info(input_shape, 1, data_type); TensorInfo weights_info(weights_shape, 1, data_type); TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); - DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo()); + DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(), add_bias); } }; @@ -190,7 +232,7 @@ class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixture public: template void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, - unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) + unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, bool add_bias) { ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); @@ -200,7 +242,7 @@ public: TensorInfo input_info(input_shape, 1, data_type, quantization_info); TensorInfo weights_info(weights_shape, 1, data_type, quantization_info); TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); - DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, quantization_info); + DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, quantization_info, add_bias); } }; -- cgit v1.2.1