From 19023835fa5a73dea2823edf667c711b03bc5060 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Wed, 17 Jun 2020 16:08:10 +0000 Subject: Revert "COMPMID-3480: Perform in-place computations in NEArithmeticAdditionKernel" This reverts commit 4a61653202afb018f4f259d3c144a735d73f0a20. Reason for revert: We will allow in-place computations by providing the same input1 (or input2) as output, thus avoiding changes in the interface. Change-Id: I7c8669e207e15731dc26dc366150bf960508a879 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3035 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- tests/validation/CL/ArithmeticAddition.cpp | 154 +++++++++++++++++++-- .../validation/GLES_COMPUTE/ArithmeticAddition.cpp | 7 +- tests/validation/NEON/ArithmeticAddition.cpp | 73 +++------- .../fixtures/ArithmeticOperationsFixture.h | 50 +++---- 4 files changed, 183 insertions(+), 101 deletions(-) mode change 100644 => 100755 tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp (limited to 'tests/validation') diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index 229dcbc712..41415ee481 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -108,9 +108,31 @@ using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), + shape, policy) +{ + // Create tensors + CLTensor ref_src1 = create_tensor(shape, DataType::U8); + CLTensor ref_src2 = create_tensor(shape, DataType::U8); + CLTensor dst = create_tensor(shape, DataType::U8); + + // Create and Configure function + CLArithmeticAddition add; + add.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -118,17 +140,39 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework TEST_SUITE_END() // U8 TEST_SUITE(S16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), + shape, data_type, policy) +{ + // Create tensors + CLTensor ref_src1 = create_tensor(shape, data_type); + CLTensor ref_src2 = create_tensor(shape, DataType::S16); + CLTensor dst = create_tensor(shape, DataType::S16); + + // Create and Configure function + CLArithmeticAddition add; + add.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -141,6 +185,29 @@ using CLArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantiz TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), + shape, policy) +{ + // Create tensors + CLTensor ref_src1 = create_tensor(shape, DataType::QASYMM8); + CLTensor ref_src2 = create_tensor(shape, DataType::QASYMM8); + CLTensor dst = create_tensor(shape, DataType::QASYMM8); + + // Create and Configure function + CLArithmeticAddition add; + add.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8Dataset), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), @@ -153,6 +220,29 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, } TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), + shape, policy) +{ + // Create tensors + CLTensor ref_src1 = create_tensor(shape, DataType::QASYMM8_SIGNED); + CLTensor ref_src2 = create_tensor(shape, DataType::QASYMM8_SIGNED); + CLTensor dst = create_tensor(shape, DataType::QASYMM8_SIGNED); + + // Create and Configure function + CLArithmeticAddition add; + add.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8SignedDataset), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), @@ -165,6 +255,29 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, f } TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM16) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), + shape, policy) +{ + // Create tensors + CLTensor ref_src1 = create_tensor(shape, DataType::QSYMM16); + CLTensor ref_src2 = create_tensor(shape, DataType::QSYMM16); + CLTensor dst = create_tensor(shape, DataType::QSYMM16); + + // Create and Configure function + CLArithmeticAddition add; + add.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQSYMM16Dataset), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), @@ -200,6 +313,29 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture TEST_SUITE_END() // FP16 TEST_SUITE(FP32) +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), + shape, policy) +{ + // Create tensors + CLTensor ref_src1 = create_tensor(shape, DataType::F32); + CLTensor ref_src2 = create_tensor(shape, DataType::F32); + CLTensor dst = create_tensor(shape, DataType::F32); + + // Create and Configure function + CLArithmeticAddition add; + add.configure(&ref_src1, &ref_src2, &dst, policy); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding(); + validate(ref_src1.info()->padding(), padding); + validate(ref_src2.info()->padding(), padding); + validate(dst.info()->padding(), padding); +} + FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset)) diff --git a/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp b/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp old mode 100644 new mode 100755 index 336870ac8a..82946fa209 --- a/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp +++ b/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -56,9 +56,8 @@ using GCArithmeticAdditionFixture = ArithmeticAdditionValidationFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunSmall, GCArithmeticAdditionFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(GCAccessor(_target), _reference); diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp index 19ebfedcb5..72993172fd 100644 --- a/tests/validation/NEON/ArithmeticAddition.cpp +++ b/tests/validation/NEON/ArithmeticAddition.cpp @@ -104,33 +104,13 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( ConvertPolicy::WRAP); ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS); } - -DATA_TEST_CASE(ValidateInPlace, framework::DatasetMode::ALL, zip(zip( - framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(1U, 13U, 2U), 1, DataType::U8), // Unsupported broadcast on input1 with in-place computation - }), - framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(1U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - })), - framework::dataset::make("Expected", { true, true, false })), - input1_info, input2_info, expected) -{ - Status s = NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), - &input2_info.clone()->set_is_resizable(false), - nullptr, - ConvertPolicy::WRAP); - ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS); -} // clang-format on // *INDENT-ON* TEST_SUITE(Integer) TEST_SUITE(U8) -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false, true }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); @@ -138,17 +118,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework TEST_SUITE_END() // U8 TEST_SUITE(S16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); @@ -156,9 +134,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture, framework TEST_SUITE_END() // S16 TEST_SUITE(S32) -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionS32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS32Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); @@ -169,9 +146,8 @@ TEST_SUITE_END() // Integer TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(F16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); @@ -180,17 +156,15 @@ TEST_SUITE_END() // F16 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE(F32) -FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); @@ -199,28 +173,17 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture, framework:: template using NEArithmeticAdditionBroadcastFixture = ArithmeticAdditionBroadcastValidationFixture; -FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEArithmeticAdditionBroadcastFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapesBroadcast(), - ArithmeticAdditionFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) -{ - // Validate output - validate(Accessor(_target), _reference); -} - -FIXTURE_DATA_TEST_CASE(RunSmallBroadcastInPlace, NEArithmeticAdditionBroadcastFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapesBroadcastInPlace(), +FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEArithmeticAdditionBroadcastFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(), ArithmeticAdditionFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { true }))) + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEArithmeticAdditionBroadcastFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapesBroadcast(), +FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEArithmeticAdditionBroadcastFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(), ArithmeticAdditionFP32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("InPlace", { false }))) + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(Accessor(_target), _reference); diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h index 05c55a0b63..faf0aaa68b 100644 --- a/tests/validation/fixtures/ArithmeticOperationsFixture.h +++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h @@ -48,10 +48,8 @@ public: template void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, - bool in_place) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info) { - _in_place = in_place; _op = op; _act_info = act_info; _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out); @@ -73,11 +71,9 @@ protected: TensorType ref_src2 = create_tensor(shape1, data_type1, 1, qinfo1); TensorType dst = create_tensor(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out); - TensorType *dst_ptr = _in_place ? nullptr : &dst; - // Create and configure function FunctionType arith_op; - arith_op.configure(&ref_src1, &ref_src2, dst_ptr, convert_policy, _act_info); + arith_op.configure(&ref_src1, &ref_src2, &dst, convert_policy, _act_info); ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -86,15 +82,11 @@ protected: // Allocate tensors ref_src1.allocator()->allocate(); ref_src2.allocator()->allocate(); + dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - - if(!_in_place) - { - dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - } + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors fill(AccessorType(ref_src1), 0); @@ -103,14 +95,7 @@ protected: // Compute function arith_op.run(); - if(_in_place) - { - return ref_src1; - } - else - { - return dst; - } + return dst; } SimpleTensor compute_reference(const TensorShape &shape0, const TensorShape &shape1, @@ -134,7 +119,6 @@ protected: SimpleTensor _reference{}; reference::ArithmeticOperation _op{ reference::ArithmeticOperation::ADD }; ActivationLayerInfo _act_info{}; - bool _in_place{}; }; template @@ -142,10 +126,10 @@ class ArithmeticAdditionBroadcastValidationFixture : public ArithmeticOperationG { public: template - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, bool in_place) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place); + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo()); } }; @@ -154,10 +138,10 @@ class ArithmeticAdditionValidationFixture : public ArithmeticOperationGenericFix { public: template - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, bool in_place) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place); + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo()); } }; @@ -169,7 +153,7 @@ public: void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info) { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false); + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); } }; @@ -181,7 +165,7 @@ public: void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info) { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false); + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); } }; @@ -195,7 +179,7 @@ public: { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1, - output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false); + output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo()); } }; @@ -208,7 +192,7 @@ public: { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo()); } }; @@ -221,7 +205,7 @@ public: { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); } }; @@ -234,7 +218,7 @@ public: { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo()); } }; @@ -247,7 +231,7 @@ public: { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); } }; @@ -262,7 +246,7 @@ public: { ArithmeticOperationGenericFixture::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type0, data_type1, output_data_type, - convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false); + convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo()); } }; -- cgit v1.2.1