aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-12-04 12:00:36 +0000
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-12-05 16:39:18 +0000
commit6f58b37a18cfade5dbec38638926f7bd368756d9 (patch)
tree65ab884fb1a5bce325db554cbcb73768907043ae
parent8d4d1b85bc57d5f76f3939bb422e44df68dc2342 (diff)
downloadComputeLibrary-6f58b37a18cfade5dbec38638926f7bd368756d9.tar.gz
COMPMID-2798 Add support for QASYMM8_SIGNED in NEArithmeticSubtraction
Change-Id: Ib90e0ce46f8dc006827d9ee9d95cf14e8b7832ad Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/2415 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h31
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h12
-rw-r--r--src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp46
-rw-r--r--tests/validation/NEON/ArithmeticSubtraction.cpp118
-rw-r--r--tests/validation/fixtures/ArithmeticOperationsFixture.h14
5 files changed, 99 insertions, 122 deletions
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
index 97e0788146..e90c8b5fa2 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
@@ -56,19 +56,20 @@ public:
*
* Valid configurations (Input1,Input2) -> Output :
*
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (QASYMM8, QASYMM8) -> QASYMM8
- * - (S16,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,S16) -> S16
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (QASYMM8, QASYMM8) -> QASYMM8
+ * - (QASYMM8_SIGNED, QASYMM8_SIGNED) -> QASYMM8_SIGNED
+ * - (S16,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,S16) -> S16
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
*
- * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
- * @param[in] policy Overflow policy. Convert policy cannot be WRAP if datatype is QASYMM8
+ * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32.
+ * @param[in] policy Overflow policy. Convert policy cannot be WRAP if datatype is QASYMM8/QASYMM8_SIGNED
*/
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtractionKernel
@@ -91,9 +92,9 @@ public:
private:
/** Common signature for all the specialised sub functions
*
- * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32.
* @param[in] window Region on which to execute the kernel.
*/
using SubFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
index 671a5dd61c..e2c6496416 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
@@ -45,17 +45,17 @@ class NEArithmeticSubtraction : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
* @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is QASYMM8.
*/
void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtraction
*
- * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32
- * @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
+ * @param[in] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/F16/F32
* @param[in] policy Policy to use to handle overflow. Convert policy cannot be WRAP if datatype is QASYMM8
*
* @return a status
diff --git a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
index 8874b52e19..7a2601be26 100644
--- a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
@@ -113,6 +113,38 @@ void sub_saturate_QAYSMM8_QAYSMM8_QAYSMM8(const ITensor *in1, const ITensor *in2
input1, input2, output);
}
+void sub_saturate_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
+{
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
+ Iterator output(out, window);
+
+ const UniformQuantizationInfo iq1_info = in1->info()->quantization_info().uniform();
+ const UniformQuantizationInfo iq2_info = in2->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = out->info()->quantization_info().uniform();
+
+ execute_window_loop(window, [&](const Coordinates &)
+ {
+ const float32x4x4_t ta1 = vdequantize(vld1q_s8(reinterpret_cast<const qasymm8_signed_t *>(input1.ptr())), iq1_info);
+ const float32x4x4_t ta2 = vdequantize(vld1q_s8(reinterpret_cast<const qasymm8_signed_t *>(input2.ptr())), iq2_info);
+
+ const float32x4x4_t ta3 =
+ {
+ {
+ vsubq_f32(ta1.val[0], ta2.val[0]),
+ vsubq_f32(ta1.val[1], ta2.val[1]),
+ vsubq_f32(ta1.val[2], ta2.val[2]),
+ vsubq_f32(ta1.val[3], ta2.val[3]),
+ }
+ };
+
+ const int8x16_t result = vquantize_signed(ta3, oq_info);
+
+ vst1q_s8(reinterpret_cast<qasymm8_signed_t *>(output.ptr()), result);
+ },
+ input1, input2, output);
+}
+
void sub_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
@@ -357,9 +389,9 @@ inline Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &i
{
ARM_COMPUTE_UNUSED(policy);
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QASYMM8, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::F16, DataType::F32);
const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
@@ -367,6 +399,7 @@ inline Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &i
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
!(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8)
&& !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8)
+ && !(input1.data_type() == DataType::QASYMM8_SIGNED && input2.data_type() == DataType::QASYMM8_SIGNED)
&& !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8)
&& !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16)
&& !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8)
@@ -376,8 +409,9 @@ inline Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &i
"You called subtract with the wrong image formats");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && policy == ConvertPolicy::WRAP,
- "Convert policy cannot be WRAP if datatype is QASYMM8");
+ input1.data_type() == DataType::QASYMM8_SIGNED && input2.data_type() == DataType::QASYMM8_SIGNED && policy == ConvertPolicy::WRAP
+ && input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && policy == ConvertPolicy::WRAP,
+ "Convert policy cannot be WRAP if datatype is QASYMM8 or QASYMM8_SIGNED");
// Validate in case of configured output
if(output.total_size() > 0)
@@ -385,6 +419,7 @@ inline Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &i
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
!(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::U8)
&& !(input1.data_type() == DataType::QASYMM8 && input2.data_type() == DataType::QASYMM8 && output.data_type() == DataType::QASYMM8)
+ && !(input1.data_type() == DataType::QASYMM8_SIGNED && input2.data_type() == DataType::QASYMM8_SIGNED && output.data_type() == DataType::QASYMM8_SIGNED)
&& !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
&& !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
&& !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
@@ -463,6 +498,7 @@ void NEArithmeticSubtractionKernel::configure(const ITensor *input1, const ITens
{ "sub_saturate_U8_U8_U8", &sub_saturate_U8_U8_U8 },
{ "sub_saturate_U8_U8_S16", &sub_saturate_U8_U8_S16 },
{ "sub_saturate_QASYMM8_QASYMM8_QASYMM8", &sub_saturate_QAYSMM8_QAYSMM8_QAYSMM8 },
+ { "sub_saturate_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED", &sub_saturate_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED },
{ "sub_wrap_U8_S16_S16", &sub_wrap_U8_S16_S16 },
{ "sub_wrap_S16_U8_S16", &sub_wrap_S16_U8_S16 },
{ "sub_saturate_U8_S16_S16", &sub_saturate_U8_S16_S16 },
diff --git a/tests/validation/NEON/ArithmeticSubtraction.cpp b/tests/validation/NEON/ArithmeticSubtraction.cpp
index 650738c0f6..a57b113082 100644
--- a/tests/validation/NEON/ArithmeticSubtraction.cpp
+++ b/tests/validation/NEON/ArithmeticSubtraction.cpp
@@ -43,18 +43,21 @@ namespace validation
{
namespace
{
-
#ifdef __aarch64__
constexpr AbsoluteTolerance<float> tolerance_qasymm8(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
-#else //__aarch64__
+#else //__aarch64__
constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
-#endif //__aarch64__
+#endif //__aarch64__
/** Input data sets **/
const auto ArithmeticSubtractionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8),
framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("DataType", DataType::QASYMM8));
+const auto ArithmeticSubtractionQASYMM8SIGNEDDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED));
+
const auto ArithmeticSubtractionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8),
framework::dataset::make("DataType", DataType::U8)),
framework::dataset::make("DataType", DataType::U8));
@@ -74,6 +77,9 @@ const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset
const auto ArithmeticSubtractionQuantizationInfoDataset = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(10, 120) }),
framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(20, 110) })),
framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(15, 125) }));
+const auto ArithmeticSubtractionQuantizationInfoSignedDataset = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(0.5f, 10) }),
+ framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(0.5f, 20) })),
+ framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(0.5f, 50) }));
} // namespace
TEST_SUITE(NEON)
@@ -124,29 +130,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
// *INDENT-ON*
TEST_SUITE(U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- shape, policy)
-{
- // Create tensors
- Tensor ref_src1 = create_tensor<Tensor>(shape, DataType::U8);
- Tensor ref_src2 = create_tensor<Tensor>(shape, DataType::U8);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8);
-
- // Create and Configure function
- NEArithmeticSubtraction sub;
- sub.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionU8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
{
@@ -155,33 +138,11 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<uint8_t>, framew
}
TEST_SUITE_END() // U8
-using NEArithmeticSubtractionQuantFixture = ArithmeticSubtractionQuantValidationFixture<Tensor, Accessor, NEArithmeticSubtraction>;
+using NEArithmeticSubtractionQuantFixture = ArithmeticSubtractionQuantValidationFixture<Tensor, Accessor, NEArithmeticSubtraction>;
+using NEArithmeticSubtractionQuantSignedFixture = ArithmeticSubtractionQuantSignedValidationFixture<Tensor, Accessor, NEArithmeticSubtraction>;
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
- shape, policy)
-{
- // Create tensors
- Tensor ref_src1 = create_tensor<Tensor>(shape, DataType::QASYMM8);
- Tensor ref_src2 = create_tensor<Tensor>(shape, DataType::QASYMM8);
- Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8);
-
- // Create and Configure function
- NEArithmeticSubtraction sub;
- sub.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionQuantFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
datasets::SmallShapes(),
ArithmeticSubtractionQASYMM8Dataset),
@@ -192,33 +153,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionQuantFixture, framework:
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END() // QASYMM8
-TEST_SUITE_END() // Quantized
-TEST_SUITE(S16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- shape, data_type, policy)
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionQuantSignedFixture, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ ArithmeticSubtractionQASYMM8SIGNEDDataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
+ ArithmeticSubtractionQuantizationInfoSignedDataset))
{
- // Create tensors
- Tensor ref_src1 = create_tensor<Tensor>(shape, data_type);
- Tensor ref_src2 = create_tensor<Tensor>(shape, DataType::S16);
- Tensor dst = create_tensor<Tensor>(shape, DataType::S16);
-
- // Create and Configure function
- NEArithmeticSubtraction sub;
- sub.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
}
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE_END() // Quantized
+TEST_SUITE(S16)
FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionS16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
{
@@ -247,29 +196,6 @@ TEST_SUITE_END() // F16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(F32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- shape, policy)
-{
- // Create tensors
- Tensor ref_src1 = create_tensor<Tensor>(shape, DataType::F32);
- Tensor ref_src2 = create_tensor<Tensor>(shape, DataType::F32);
- Tensor dst = create_tensor<Tensor>(shape, DataType::F32);
-
- // Create and Configure function
- NEArithmeticSubtraction sub;
- sub.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
{
diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h
index 76f241cedb..086b52bc31 100644
--- a/tests/validation/fixtures/ArithmeticOperationsFixture.h
+++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h
@@ -182,6 +182,20 @@ public:
}
};
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class ArithmeticSubtractionQuantSignedValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, qasymm8_signed_t>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
+ QuantizationInfo in1_qua_info, QuantizationInfo in2_qua_info, QuantizationInfo out_qua_info)
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, qasymm8_signed_t>::setup(reference::ArithmeticOperation::SUB, shape, shape,
+ data_type0, data_type1, output_data_type, convert_policy,
+ in1_qua_info, in2_qua_info, out_qua_info);
+ }
+};
+
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ArithmeticSubtractionValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
{