From 81f0d15d6840a0ae8ef571114555a26da74c4a43 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Tue, 11 Jul 2017 15:00:52 +0100 Subject: COMPMID-444: Add support for QS8/QS16 NEON Arithmetic Add/Sub/Mul. Change-Id: Ia482498688ca1884272b5062e3415e736e03d36f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80448 Reviewed-by: Georgios Pinitas Tested-by: Kaizen --- tests/validation/NEON/ArithmeticAddition.cpp | 79 +++++++++++++++++++--- tests/validation/NEON/ArithmeticSubtraction.cpp | 81 ++++++++++++++++++++--- tests/validation/NEON/PixelWiseMultiplication.cpp | 48 +++++++++++++- tests/validation/Reference.cpp | 16 ++--- tests/validation/Reference.h | 26 ++++---- 5 files changed, 210 insertions(+), 40 deletions(-) (limited to 'tests') diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp index 65c3295e18..6feb15ad34 100644 --- a/tests/validation/NEON/ArithmeticAddition.cpp +++ b/tests/validation/NEON/ArithmeticAddition.cpp @@ -51,20 +51,21 @@ namespace { /** Compute Neon arithmetic addition function. * - * @param[in] shape Shape of the input and output tensors. - * @param[in] dt_in0 Data type of first input tensor. - * @param[in] dt_in1 Data type of second input tensor. - * @param[in] dt_out Data type of the output tensor. - * @param[in] policy Overflow policy of the operation. + * @param[in] shape Shape of the input and output tensors. + * @param[in] dt_in0 Data type of first input tensor. + * @param[in] dt_in1 Data type of second input tensor. + * @param[in] dt_out Data type of the output tensor. + * @param[in] policy Overflow policy of the operation. + * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0). * * @return Computed output tensor. */ -Tensor compute_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy) +Tensor compute_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy, int fixed_point_position = 0) { // Create tensors - Tensor src1 = create_tensor(shape, dt_in0); - Tensor src2 = create_tensor(shape, dt_in1); - Tensor dst = create_tensor(shape, dt_out); + Tensor src1 = create_tensor(shape, dt_in0, 1, fixed_point_position); + Tensor src2 = create_tensor(shape, dt_in1, 1, fixed_point_position); + Tensor dst = create_tensor(shape, dt_out, 1, fixed_point_position); // Create and configure function NEArithmeticAddition add; @@ -184,6 +185,66 @@ BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::make({ Da } BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE(Quantized) +BOOST_AUTO_TEST_SUITE(QS8) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE(QS16) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_addition(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END() + #ifdef ARM_COMPUTE_ENABLE_FP16 BOOST_AUTO_TEST_SUITE(F16) BOOST_DATA_TEST_CASE(RunSmall, SmallShapes(), shape) diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp index 37240faaf6..54cd9f04ba 100644 --- a/tests/validation/NEON/ArithmeticSubtraction.cpp +++ b/tests/validation/NEON/ArithmeticSubtraction.cpp @@ -51,20 +51,21 @@ namespace { /** Compute Neon arithmetic subtraction function. * - * @param[in] shape Shape of the input and output tensors. - * @param[in] dt_in0 Data type of first input tensor. - * @param[in] dt_in1 Data type of second input tensor. - * @param[in] dt_out Data type of the output tensor. - * @param[in] policy Overflow policy of the operation. + * @param[in] shape Shape of the input and output tensors. + * @param[in] dt_in0 Data type of first input tensor. + * @param[in] dt_in1 Data type of second input tensor. + * @param[in] dt_out Data type of the output tensor. + * @param[in] policy Overflow policy of the operation. + * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0). * * @return Computed output tensor. */ -Tensor compute_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy) +Tensor compute_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy policy, int fixed_point_position = 0) { // Create tensors - Tensor src1 = create_tensor(shape, dt_in0); - Tensor src2 = create_tensor(shape, dt_in1); - Tensor dst = create_tensor(shape, dt_out); + Tensor src1 = create_tensor(shape, dt_in0, 1, fixed_point_position); + Tensor src2 = create_tensor(shape, dt_in1, 1, fixed_point_position); + Tensor dst = create_tensor(shape, dt_out, 1, fixed_point_position); // Create and configure function NEArithmeticSubtraction sub; @@ -184,7 +185,67 @@ BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::make({ Da } BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE(F32) +BOOST_AUTO_TEST_SUITE(Quantized) +BOOST_AUTO_TEST_SUITE(QS8) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 7), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS8, DataType::QS8, DataType::QS8, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE(QS16) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * ConvertPolicies() * boost::unit_test::data::xrange(1, 15), + shape, policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_arithmetic_subtraction(shape, DataType::QS16, DataType::QS16, DataType::QS16, policy, fixed_point_position); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE(Float) BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly")) BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * boost::unit_test::data::make({ ConvertPolicy::SATURATE, ConvertPolicy::WRAP }), shape, policy) diff --git a/tests/validation/NEON/PixelWiseMultiplication.cpp b/tests/validation/NEON/PixelWiseMultiplication.cpp index 26ea38a52b..4bc4d6ca06 100644 --- a/tests/validation/NEON/PixelWiseMultiplication.cpp +++ b/tests/validation/NEON/PixelWiseMultiplication.cpp @@ -334,7 +334,7 @@ BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * (1.f / 255.f) * ConvertPolicies() BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ -BOOST_AUTO_TEST_SUITE(Float) +BOOST_AUTO_TEST_SUITE(F32) BOOST_AUTO_TEST_SUITE(Scale255) BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly")) BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * (1.f / 255.f) * ConvertPolicies() @@ -428,6 +428,7 @@ BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE(Quantized) +BOOST_AUTO_TEST_SUITE(QS8) BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * DataType::QS8 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange(1, 7), shape, dt, convert_policy, rounding_policy, fixed_point_position) @@ -441,6 +442,51 @@ BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * DataType::QS8 *ConvertPolicies() // Validate output validate(NEAccessor(dst), ref_dst); } + +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * DataType::QS8 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange(1, 7), + shape, dt, convert_policy, rounding_policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_AUTO_TEST_SUITE_END() + +BOOST_AUTO_TEST_SUITE(QS16) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * DataType::QS16 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange(1, 15), + shape, dt, convert_policy, rounding_policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy, fixed_point_position); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_fixed_point_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, fixed_point_position, convert_policy, rounding_policy); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} + +BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) +BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * DataType::QS16 *ConvertPolicies() * RoundingPolicy::TO_ZERO * boost::unit_test::data::xrange(1, 15), + shape, dt, convert_policy, rounding_policy, fixed_point_position) +{ + // Compute function + Tensor dst = compute_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_pixel_wise_multiplication(shape, dt, dt, dt, 1.f, convert_policy, rounding_policy); + + // Validate output + validate(NEAccessor(dst), ref_dst); +} +BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp index 1b941870ba..65705b17de 100644 --- a/tests/validation/Reference.cpp +++ b/tests/validation/Reference.cpp @@ -168,12 +168,12 @@ RawTensor Reference::compute_reference_accumulate_weighted(const TensorShape &sh return ref_dst; } -RawTensor Reference::compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy) +RawTensor Reference::compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position) { // Create reference - RawTensor ref_src1 = library->get(shape, dt_in0); - RawTensor ref_src2 = library->get(shape, dt_in1); - RawTensor ref_dst = library->get(shape, dt_out); + RawTensor ref_src1 = library->get(shape, dt_in0, 1, fixed_point_position); + RawTensor ref_src2 = library->get(shape, dt_in1, 1, fixed_point_position); + RawTensor ref_dst = library->get(shape, dt_out, 1, fixed_point_position); // Fill reference library->fill_tensor_uniform(ref_src1, 0); @@ -185,12 +185,12 @@ RawTensor Reference::compute_reference_arithmetic_addition(const TensorShape &sh return ref_dst; } -RawTensor Reference::compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy) +RawTensor Reference::compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position) { // Create reference - RawTensor ref_src1 = library->get(shape, dt_in0); - RawTensor ref_src2 = library->get(shape, dt_in1); - RawTensor ref_dst = library->get(shape, dt_out); + RawTensor ref_src1 = library->get(shape, dt_in0, 1, fixed_point_position); + RawTensor ref_src2 = library->get(shape, dt_in1, 1, fixed_point_position); + RawTensor ref_dst = library->get(shape, dt_out, 1, fixed_point_position); // Fill reference library->fill_tensor_uniform(ref_src1, 0); diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h index 902d04d151..3ad9814439 100644 --- a/tests/validation/Reference.h +++ b/tests/validation/Reference.h @@ -107,26 +107,28 @@ public: static RawTensor compute_reference_accumulate_weighted(const TensorShape &shape, float alpha); /** Compute reference arithmetic addition. * - * @param[in] shape Shape of the input and output tensors. - * @param[in] dt_in0 Data type of first input tensor. - * @param[in] dt_in1 Data type of second input tensor. - * @param[in] dt_out Data type of the output tensor. - * @param[in] convert_policy Overflow policy of the operation. + * @param[in] shape Shape of the input and output tensors. + * @param[in] dt_in0 Data type of first input tensor. + * @param[in] dt_in1 Data type of second input tensor. + * @param[in] dt_out Data type of the output tensor. + * @param[in] convert_policy Overflow policy of the operation. + * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers * * @return Computed raw tensor. */ - static RawTensor compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy); + static RawTensor compute_reference_arithmetic_addition(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position = 0); /** Compute reference arithmetic subtraction. * - * @param[in] shape Shape of the input and output tensors. - * @param[in] dt_in0 Data type of first input tensor. - * @param[in] dt_in1 Data type of second input tensor. - * @param[in] dt_out Data type of the output tensor. - * @param[in] convert_policy Overflow policy of the operation. + * @param[in] shape Shape of the input and output tensors. + * @param[in] dt_in0 Data type of first input tensor. + * @param[in] dt_in1 Data type of second input tensor. + * @param[in] dt_out Data type of the output tensor. + * @param[in] convert_policy Overflow policy of the operation. + * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers * * @return Computed raw tensor. */ - static RawTensor compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy); + static RawTensor compute_reference_arithmetic_subtraction(const TensorShape &shape, DataType dt_in0, DataType dt_in1, DataType dt_out, ConvertPolicy convert_policy, int fixed_point_position = 0); /** Compute reference bitwise and. * * @param[in] shape Shape of the input and output tensors. -- cgit v1.2.1