aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2020-06-04 15:05:38 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-06-15 13:59:04 +0000
commit4a61653202afb018f4f259d3c144a735d73f0a20 (patch)
tree082fd42e91cc0914dcacc0746bbe3e117d74210c /tests
parentccd94966cc58ef5148577e71ba1a4ff5aae1f3bb (diff)
downloadComputeLibrary-4a61653202afb018f4f259d3c144a735d73f0a20.tar.gz
COMPMID-3480: Perform in-place computations in NEArithmeticAdditionKernel
Change-Id: I0089657dd95d7c7b8592984def8e8de1d7e6d085 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3308 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/datasets/ShapeDatasets.h24
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp154
-rw-r--r--[-rwxr-xr-x]tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp7
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp73
-rw-r--r--tests/validation/fixtures/ArithmeticOperationsFixture.h50
5 files changed, 125 insertions, 183 deletions
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 087342d3b8..0ea8091258 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -241,6 +241,30 @@ public:
}
};
+/** Data set containing pairs of small tensor shapes that are broadcast compatible for in-place computations.
+ * When doing in-place computations, only the second input can be broadcast.
+ */
+class SmallShapesBroadcastInPlace final : public framework::dataset::ZipDataset<ShapeDataset, ShapeDataset>
+{
+public:
+ SmallShapesBroadcastInPlace()
+ : ZipDataset<ShapeDataset, ShapeDataset>(
+ ShapeDataset("Shape0",
+ {
+ TensorShape{ 9U, 9U, 3U, 4U },
+ TensorShape{ 27U, 13U, 2U, 4U },
+ TensorShape{ 1U, 16U, 10U, 2U, 128U },
+ }),
+ ShapeDataset("Shape1",
+ {
+ TensorShape{ 9U, 1U, 3U },
+ TensorShape{ 1U },
+ TensorShape{ 1U, 1U, 1U, 1U, 128U },
+ }))
+ {
+ }
+};
+
/** Data set containing medium tensor shapes. */
class MediumShapes final : public ShapeDataset
{
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 41415ee481..229dcbc712 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -108,31 +108,9 @@ using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<CLTensor
TEST_SUITE(Integer)
TEST_SUITE(U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- shape, policy)
-{
- // Create tensors
- CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::U8);
- CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::U8);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::U8);
-
- // Create and Configure function
- CLArithmeticAddition add;
- add.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -140,39 +118,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<uint8_t>, framework
TEST_SUITE_END() // U8
TEST_SUITE(S16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- shape, data_type, policy)
-{
- // Create tensors
- CLTensor ref_src1 = create_tensor<CLTensor>(shape, data_type);
- CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::S16);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::S16);
-
- // Create and Configure function
- CLArithmeticAddition add;
- add.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -185,29 +141,6 @@ using CLArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantiz
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
- shape, policy)
-{
- // Create tensors
- CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::QASYMM8);
- CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::QASYMM8);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::QASYMM8);
-
- // Create and Configure function
- CLArithmeticAddition add;
- add.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(),
ArithmeticAdditionQASYMM8Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -220,29 +153,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture<uint8_t>,
}
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
- shape, policy)
-{
- // Create tensors
- CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::QASYMM8_SIGNED);
- CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::QASYMM8_SIGNED);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::QASYMM8_SIGNED);
-
- // Create and Configure function
- CLArithmeticAddition add;
- add.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(),
ArithmeticAdditionQASYMM8SignedDataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -255,29 +165,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture<int8_t>, f
}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
- shape, policy)
-{
- // Create tensors
- CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::QSYMM16);
- CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::QSYMM16);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::QSYMM16);
-
- // Create and Configure function
- CLArithmeticAddition add;
- add.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(),
ArithmeticAdditionQSYMM16Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -313,29 +200,6 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture<half>
TEST_SUITE_END() // FP16
TEST_SUITE(FP32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
- shape, policy)
-{
- // Create tensors
- CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::F32);
- CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::F32);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::F32);
-
- // Create and Configure function
- CLArithmeticAddition add;
- add.configure(&ref_src1, &ref_src2, &dst, policy);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(shape);
- validate(dst.info()->valid_region(), valid_region);
-
- // Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
- validate(ref_src1.info()->padding(), padding);
- validate(ref_src2.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-}
-
FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset),
framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
EmptyActivationFunctionsDataset))
diff --git a/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp b/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp
index 82946fa209..336870ac8a 100755..100644
--- a/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp
+++ b/tests/validation/GLES_COMPUTE/ArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,8 +56,9 @@ using GCArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<GCTensor
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, GCArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, GCArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(GCAccessor(_target), _reference);
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index 72993172fd..19ebfedcb5 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -104,13 +104,33 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ConvertPolicy::WRAP);
ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
}
+
+DATA_TEST_CASE(ValidateInPlace, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(1U, 13U, 2U), 1, DataType::U8), // Unsupported broadcast on input1 with in-place computation
+ }),
+ framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(1U, 13U, 2U), 1, DataType::U8),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+ })),
+ framework::dataset::make("Expected", { true, true, false })),
+ input1_info, input2_info, expected)
+{
+ Status s = NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false),
+ &input2_info.clone()->set_is_resizable(false),
+ nullptr,
+ ConvertPolicy::WRAP);
+ ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
+}
// clang-format on
// *INDENT-ON*
TEST_SUITE(Integer)
TEST_SUITE(U8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false, true })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -118,15 +138,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<uint8_t>, framework
TEST_SUITE_END() // U8
TEST_SUITE(S16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -134,8 +156,9 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework
TEST_SUITE_END() // S16
TEST_SUITE(S32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<int32_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<int32_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionS32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -146,8 +169,9 @@ TEST_SUITE_END() // Integer
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(F16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -156,15 +180,17 @@ TEST_SUITE_END() // F16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(F32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -173,17 +199,28 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<float>, framework::
template <typename T>
using NEArithmeticAdditionBroadcastFixture = ArithmeticAdditionBroadcastValidationFixture<Tensor, Accessor, NEArithmeticAddition, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(),
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapesBroadcast(),
+ ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcastInPlace, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapesBroadcastInPlace(),
ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { true })))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(),
+FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapesBroadcast(),
ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ framework::dataset::make("InPlace", { false })))
{
// Validate output
validate(Accessor(_target), _reference);
diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h
index 4a6b0bd3f3..1019e60233 100644
--- a/tests/validation/fixtures/ArithmeticOperationsFixture.h
+++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h
@@ -48,8 +48,10 @@ public:
template <typename...>
void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1,
DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info,
+ bool in_place)
{
+ _in_place = in_place;
_op = op;
_act_info = act_info;
_target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
@@ -71,9 +73,11 @@ protected:
TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out);
+ TensorType *dst_ptr = _in_place ? nullptr : &dst;
+
// Create and configure function
FunctionType arith_op;
- arith_op.configure(&ref_src1, &ref_src2, &dst, convert_policy, _act_info);
+ arith_op.configure(&ref_src1, &ref_src2, dst_ptr, convert_policy, _act_info);
ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -82,11 +86,15 @@ protected:
// Allocate tensors
ref_src1.allocator()->allocate();
ref_src2.allocator()->allocate();
- dst.allocator()->allocate();
ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ if(!_in_place)
+ {
+ dst.allocator()->allocate();
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
// Fill tensors
fill(AccessorType(ref_src1), 0);
@@ -95,7 +103,14 @@ protected:
// Compute function
arith_op.run();
- return dst;
+ if(_in_place)
+ {
+ return ref_src1;
+ }
+ else
+ {
+ return dst;
+ }
}
SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1,
@@ -119,6 +134,7 @@ protected:
SimpleTensor<T> _reference{};
reference::ArithmeticOperation _op{ reference::ArithmeticOperation::ADD };
ActivationLayerInfo _act_info{};
+ bool _in_place{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -126,10 +142,10 @@ class ArithmeticAdditionBroadcastValidationFixture : public ArithmeticOperationG
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, bool in_place)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place);
}
};
@@ -138,10 +154,10 @@ class ArithmeticAdditionValidationFixture : public ArithmeticOperationGenericFix
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, bool in_place)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place);
}
};
@@ -153,7 +169,7 @@ public:
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false);
}
};
@@ -165,7 +181,7 @@ public:
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false);
}
};
@@ -179,7 +195,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
+ output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false);
}
};
@@ -192,7 +208,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1,
data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false);
}
};
@@ -205,7 +221,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1,
data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false);
}
};
@@ -218,7 +234,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false);
}
};
@@ -231,7 +247,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false);
}
};
@@ -246,7 +262,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
data_type0, data_type1, output_data_type,
- convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
+ convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false);
}
};
} // namespace validation