From 6a2b6e835459ee91dbdf86be8dfdec0bc2421a84 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Mon, 25 Feb 2019 13:50:11 +0000 Subject: COMPMID-2010: Add support for QASYMM8 in NEArithmeticSubtractionKernel Change-Id: Ica65d5a13f5670d525bbb961a870b23a21d093d9 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/807 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez --- .../NEON/kernels/NEArithmeticSubtractionKernel.h | 41 ++++++----- .../NEON/functions/NEArithmeticSubtraction.h | 20 +++--- .../NEON/kernels/NEArithmeticSubtractionKernel.cpp | 54 ++++++++++++-- tests/validation/CL/ArithmeticAddition.cpp | 4 +- tests/validation/CL/ArithmeticSubtraction.cpp | 4 +- tests/validation/NEON/ArithmeticAddition.cpp | 6 +- tests/validation/NEON/ArithmeticSubtraction.cpp | 83 +++++++++++++++++++--- .../fixtures/ArithmeticOperationsFixture.h | 16 ++++- 8 files changed, 177 insertions(+), 51 deletions(-) diff --git a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h index 64ad6e072d..937cee4a9d 100644 --- a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h +++ b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -56,26 +56,29 @@ public: * * Valid configurations (Input1,Input2) -> Output : * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (QASYMM8, QASYMM8) -> QASYMM8 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 * - * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32. - * @param[in] policy Overflow policy. + * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32. + * @param[in] policy Overflow policy. Convert policy cannot be WRAP if datatype is QASYMM8 */ void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtractionKernel * - * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32 - * @param[in] policy Policy to use to handle overflow. + * @note Convert policy cannot be WRAP if datatype is QASYMM8 + * + * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is QASYMM8 * * @return a status */ @@ -88,9 +91,9 @@ public: private: /** Common signature for all the specialised sub functions * - * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32. + * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32. * @param[in] window Region on which to execute the kernel. */ using SubFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window); diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h index 541756cd2c..e6ff4458f0 100644 --- a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h +++ b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -33,7 +33,7 @@ class ITensor; /** Basic function to run @ref NEArithmeticSubtractionKernel * - * @note The tensor data type for the inputs must be U8/S16/F16/F32. + * @note The tensor data type for the inputs must be U8/QASYMM8/S16/F16/F32. * @note The function performs an arithmetic subtraction between two tensors. * * This function calls the following kernels: @@ -45,18 +45,18 @@ class NEArithmeticSubtraction : public INESimpleFunction public: /** Initialise the kernel's inputs, output and conversion policy. * - * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32 - * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32 - * @param[in] policy Policy to use to handle overflow. + * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is QASYMM8. */ void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtraction * - * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32 - * @param[in] policy Policy to use to handle overflow. + * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is QASYMM8 * * @return a status */ diff --git a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp index ff8fb84958..ff5893de96 100644 --- a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp +++ b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/NEAsymm.h" #include "arm_compute/core/NEON/NEFixedPoint.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" @@ -80,6 +81,34 @@ void sub_saturate_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, input1, input2, output); } +void sub_saturate_QAYSMM8_QAYSMM8_QAYSMM8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) +{ + Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape())); + Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape())); + Iterator output(out, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + const float32x4x4_t ta1 = vdequantize(vld1q_u8(reinterpret_cast(input1.ptr())), in1->info()->quantization_info()); + const float32x4x4_t ta2 = vdequantize(vld1q_u8(reinterpret_cast(input2.ptr())), in2->info()->quantization_info()); + + const float32x4x4_t ta3 = + { + { + vsubq_f32(ta1.val[0], ta2.val[0]), + vsubq_f32(ta1.val[1], ta2.val[1]), + vsubq_f32(ta1.val[2], ta2.val[2]), + vsubq_f32(ta1.val[3], ta2.val[3]), + } + }; + + const uint8x16_t result = vquantize(ta3, out->info()->quantization_info()); + + vst1q_u8(reinterpret_cast(output.ptr()), result); + }, + input1, input2, output); +} + void sub_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape())); @@ -324,18 +353,34 @@ inline Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &i { ARM_COMPUTE_UNUSED(policy); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32); const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG( + !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8) + && !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8) + && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8) + && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16) + && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8) + && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::S16) + && !(input1.data_type() == DataType::F32 && input2.data_type() == DataType::F32) + && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16), + "You called subtract with the wrong image formats"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG( + input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && policy == ConvertPolicy::WRAP, + "Convert policy cannot be WRAP if datatype is QASYMM8"); + // Validate in case of configured output if(output.total_size() > 0) { ARM_COMPUTE_RETURN_ERROR_ON_MSG( !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::U8) + && !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && output.data_type() == DataType::QASYMM8) && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16) && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16) && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16) @@ -413,6 +458,7 @@ void NEArithmeticSubtractionKernel::configure(const ITensor *input1, const ITens { "sub_wrap_U8_U8_S16", &sub_wrap_U8_U8_S16 }, { "sub_saturate_U8_U8_U8", &sub_saturate_U8_U8_U8 }, { "sub_saturate_U8_U8_S16", &sub_saturate_U8_U8_S16 }, + { "sub_saturate_QASYMM8_QASYMM8_QASYMM8", &sub_saturate_QAYSMM8_QAYSMM8_QAYSMM8 }, { "sub_wrap_U8_S16_S16", &sub_wrap_U8_S16_S16 }, { "sub_wrap_S16_U8_S16", &sub_wrap_S16_U8_S16 }, { "sub_saturate_U8_S16_S16", &sub_saturate_U8_S16_S16 }, diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index 04c9c8587a..bed04af9e5 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -130,7 +130,7 @@ using CLArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantiz TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), shape, policy) { // Create tensors @@ -155,7 +155,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Sma FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) })) diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp index a8b54fdcb8..796486bf02 100644 --- a/tests/validation/CL/ArithmeticSubtraction.cpp +++ b/tests/validation/CL/ArithmeticSubtraction.cpp @@ -131,7 +131,7 @@ using CLArithmeticSubtractionQuantizedFixture = ArithmeticSubtractionValidationQ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), shape, policy) { // Create tensors @@ -156,7 +156,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Sma FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticSubtractionQASYMM8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) })) diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp index bad0b33d37..4a72dfc923 100644 --- a/tests/validation/NEON/ArithmeticAddition.cpp +++ b/tests/validation/NEON/ArithmeticAddition.cpp @@ -248,7 +248,7 @@ using NEArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantiz TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), shape, policy) { // Create tensors @@ -274,7 +274,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) }))) @@ -282,7 +282,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, // Validate output #ifdef __aarch64__ validate(Accessor(_target), _reference); -#else //__aarch64__ +#else //__aarch64__ validate(Accessor(_target), _reference, tolerance_qasymm8); #endif //__aarch64__ } diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp index 83ebf34867..2fc5448dc2 100644 --- a/tests/validation/NEON/ArithmeticSubtraction.cpp +++ b/tests/validation/NEON/ArithmeticSubtraction.cpp @@ -44,17 +44,29 @@ namespace validation namespace { /** Input data sets **/ -const auto ArithmeticSubtractionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), - framework::dataset::make("DataType", - DataType::U8)); -const auto ArithmeticSubtractionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), +const auto ArithmeticSubtractionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataType", DataType::QASYMM8)); + +const auto ArithmeticSubtractionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), + framework::dataset::make("DataType", DataType::U8)), + framework::dataset::make("DataType", DataType::U8)); + +const auto ArithmeticSubtractionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), + framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), +const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), + framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), +const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataType", DataType::F32)); + +const auto ArithmeticSubtractionQuantizationInfoDataset = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(10, 120) }), + framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(20, 110) })), + framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(15, 125) })); } // namespace TEST_SUITE(NEON) @@ -65,29 +77,41 @@ using NEArithmeticSubtractionFixture = ArithmeticSubtractionValidationFixtureset_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(NEArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), policy)) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* @@ -124,6 +148,45 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture, framew } TEST_SUITE_END() // U8 +using NEArithmeticSubtractionQuantFixture = ArithmeticSubtractionQuantValidationFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), + shape, policy) +{ + // Create tensors + Tensor ref_src1 = create_tensor(shape, DataType::QASYMM8); + Tensor ref_src2 = create_tensor(shape, DataType::QASYMM8); + Tensor dst = create_tensor(shape, DataType::QASYMM8); + + // Create and Configure function + NEArithmeticSubtraction sub; + sub.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionQuantFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine( + datasets::SmallShapes(), + ArithmeticSubtractionQASYMM8Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), + ArithmeticSubtractionQuantizationInfoDataset)) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized + TEST_SUITE(S16) DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h index fb46a5185e..76f241cedb 100644 --- a/tests/validation/fixtures/ArithmeticOperationsFixture.h +++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -168,6 +168,20 @@ public: } }; +template +class ArithmeticSubtractionQuantValidationFixture : public ArithmeticOperationGenericFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, + QuantizationInfo in1_qua_info, QuantizationInfo in2_qua_info, QuantizationInfo out_qua_info) + { + ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::SUB, shape, shape, + data_type0, data_type1, output_data_type, convert_policy, + in1_qua_info, in2_qua_info, out_qua_info); + } +}; + template class ArithmeticSubtractionValidationFixture : public ArithmeticOperationGenericFixture { -- cgit v1.2.1