From da816752cad76c8e1b367e8e9c648994a1af599a Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 2 Jul 2021 09:22:14 +0100 Subject: Remove redundant implementations of Add/Sub operators Allows only implementations where inputs/output are of the same data type and removes legacy Computer Vision ones. Signed-off-by: Georgios Pinitas Change-Id: Ia2b3d23a04236aab682f0c36a1110a30f7c06d1c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5900 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- tests/validation/CL/ArithmeticAddition.cpp | 76 +++++++++++--------------- tests/validation/CL/ArithmeticSubtraction.cpp | 63 ++++++++------------- tests/validation/CL/ElementwiseMax.cpp | 11 ++-- tests/validation/CL/ElementwiseMin.cpp | 11 ++-- tests/validation/CL/ElementwiseSquaredDiff.cpp | 11 ++-- tests/validation/CL/PReluLayer.cpp | 9 +-- 6 files changed, 69 insertions(+), 112 deletions(-) (limited to 'tests/validation/CL') diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index c74f6a3b23..9e3d9afc36 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,23 +44,6 @@ namespace validation namespace { /** Input data sets **/ -const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), framework::dataset::make("DataType", - DataType::U8)); -const auto ArithmeticAdditionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataType", - DataType::QASYMM8)); -const auto ArithmeticAdditionQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataType", - DataType::QASYMM8_SIGNED)); -const auto ArithmeticAdditionQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)), - framework::dataset::make("DataType", - DataType::QSYMM16)); -const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), - framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataType", DataType::F16)); -const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataType", DataType::F32)); const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { ActivationLayerInfo() }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -76,22 +59,19 @@ TEST_SUITE(ArithmeticAddition) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), - framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { true, true, false, false})), + framework::dataset::make("Expected", { true, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -129,7 +109,7 @@ using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::U8)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output @@ -138,14 +118,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework TEST_SUITE_END() // U8 TEST_SUITE(S16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset), +FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP }))) { // Validate output @@ -160,7 +140,7 @@ using CLArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantiz TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticAdditionQASYMM8Dataset), + framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })), framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })), @@ -171,12 +151,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, } template using CLArithmeticAdditionBroadcastQuantizedFixture = ArithmeticAdditionValidationQuantizedBroadcastFixture; -FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapesBroadcast(), - ArithmeticAdditionQASYMM8Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), - framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })), - framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) }))) +FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastQuantizedFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(datasets::SmallShapesBroadcast(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), + framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })), + framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })), + framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -184,7 +165,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastQuantized TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticAdditionQASYMM8SignedDataset), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 10) })), framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })), @@ -196,7 +177,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, f TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM16) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticAdditionQSYMM16Dataset), + framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("Src0QInfo", { QuantizationInfo(1.f / 32768.f, 0), QuantizationInfo(5.f / 32768.f, 0) })), framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 32768.f, 0), QuantizationInfo(5.f / 32768.f, 0) })), @@ -213,14 +194,16 @@ using CLArithmeticAdditionFloatFixture = ArithmeticAdditionValidationFloatFixtur TEST_SUITE(Float) TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::F16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset)) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionFP16Dataset), +FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), framework::dataset::make("DataType", + DataType::F16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), ActivationFunctionsDataset)) { @@ -230,14 +213,16 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture TEST_SUITE_END() // FP16 TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset)) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionFP32Dataset), +FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), framework::dataset::make("DataType", + DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), ActivationFunctionsDataset)) { @@ -245,7 +230,8 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset), +FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFloatFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", + DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset)) { @@ -257,7 +243,7 @@ template using CLArithmeticAdditionBroadcastFloatFixture = ArithmeticAdditionBroadcastValidationFloatFixture; FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastFloatFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapesBroadcast(), - ArithmeticAdditionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset)) { @@ -265,7 +251,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastFloatFixt validate(CLAccessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticAdditionBroadcastFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapesBroadcast(), - ArithmeticAdditionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), ActivationFunctionsDataset)) { @@ -274,7 +260,7 @@ FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticAdditionBroadcast } FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticAdditionBroadcastFloatFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapesBroadcast(), - ArithmeticAdditionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset)) { diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp index 2709fcaedb..00eba7f92a 100644 --- a/tests/validation/CL/ArithmeticSubtraction.cpp +++ b/tests/validation/CL/ArithmeticSubtraction.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,24 +44,6 @@ namespace validation namespace { /** Input data sets **/ -const auto ArithmeticSubtractionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), - framework::dataset::make("DataType", - DataType::U8)); -const auto ArithmeticSubtractionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataType", - DataType::QASYMM8)); -const auto ArithmeticSubtractionQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataType", - DataType::QASYMM8_SIGNED)); -const auto ArithmeticSubtractionQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)), - framework::dataset::make("DataType", - DataType::QSYMM16)); -const auto ArithmeticSubtractionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), - framework::dataset::make("DataType", DataType::S16)); -const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataType", DataType::F16)); -const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataType", DataType::F32)); const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { ActivationLayerInfo() }); const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -79,22 +61,19 @@ TEST_SUITE(ArithmeticSubtraction) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), - framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { true, true, false, false})), + framework::dataset::make("Expected", { true, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), ConvertPolicy::WRAP)) == expected, framework::LogLevel::ERRORS); @@ -159,7 +138,8 @@ using CLArithmeticSubtractionFixture = ArithmeticSubtractionValidationFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticSubtractionU8Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::U8)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), OutOfPlaceDataSet)) { @@ -169,7 +149,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture, framew TEST_SUITE_END() // U8 TEST_SUITE(S16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticSubtractionS16Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::S16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), OutOfPlaceDataSet)) { @@ -177,7 +158,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture, framew validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticSubtractionFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticSubtractionS16Dataset), +FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticSubtractionFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", + DataType::S16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), OutOfPlaceDataSet)) { @@ -193,7 +175,7 @@ using CLArithmeticSubtractionQuantizedFixture = ArithmeticSubtractionValidationQ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQASYMM8Dataset), + framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })), framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })), @@ -206,7 +188,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQASYMM8SignedDataset), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 10) })), framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })), @@ -219,7 +201,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionQuantizedFixture TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM16) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionQSYMM16Dataset), + framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })), framework::dataset::make("Src0QInfo", { QuantizationInfo(1.f / 32768.f, 0), QuantizationInfo(5.f / 32768.f, 0) })), framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 32768.f, 0), QuantizationInfo(5.f / 32768.f, 0) })), @@ -237,7 +219,8 @@ using CLArithmeticSubtractionFloatFixture = ArithmeticSubtractionValidationFloat TEST_SUITE(Float) TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", + DataType::F16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset), OutOfPlaceDataSet)) @@ -246,7 +229,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture, fram validate(CLAccessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticSubtractionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionFP16Dataset), + framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), ActivationFunctionsDataset), InPlaceDataSet)) @@ -258,7 +241,7 @@ TEST_SUITE_END() // FP16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), - ArithmeticSubtractionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset), OutOfPlaceDataSet)) @@ -267,7 +250,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture, fra validate(CLAccessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticSubtractionFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::TinyShapes(), - ArithmeticSubtractionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), ActivationFunctionsDataset), InPlaceDataSet)) @@ -277,7 +260,7 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticSubtractionFloatFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeShapes(), - ArithmeticSubtractionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset), OutOfPlaceDataSet)) @@ -290,7 +273,7 @@ template using CLArithmeticSubtractionBroadcastFloatFixture = ArithmeticSubtractionBroadcastValidationFloatFixture; FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticSubtractionBroadcastFloatFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapesBroadcast(), - ArithmeticSubtractionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset), OutOfPlaceDataSet)) @@ -299,7 +282,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticSubtractionBroadcastFloatF validate(CLAccessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticSubtractionBroadcastFloatFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::TinyShapesBroadcast(), - ArithmeticSubtractionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), ActivationFunctionsDataset), OutOfPlaceDataSet)) @@ -309,7 +292,7 @@ FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticSubtractionBroadc } FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticSubtractionBroadcastFloatFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeShapesBroadcast(), - ArithmeticSubtractionFP32Dataset), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), EmptyActivationFunctionsDataset), OutOfPlaceDataSet)) diff --git a/tests/validation/CL/ElementwiseMax.cpp b/tests/validation/CL/ElementwiseMax.cpp index b9444b2795..225968efd1 100644 --- a/tests/validation/CL/ElementwiseMax.cpp +++ b/tests/validation/CL/ElementwiseMax.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -58,7 +58,7 @@ const auto ElementwiseMaxQASYMM8SignedDataset = combine(combine(framework::datas const auto ElementwiseMaxQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("DataType", DataType::QSYMM16)); -const auto ElementwiseMaxS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), +const auto ElementwiseMaxS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); const auto ElementwiseMaxFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -80,21 +80,18 @@ TEST_SUITE(ElementwiseMax) // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("OutputInfo",{TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { true, true, false, false})), + framework::dataset::make("Expected", { true, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLElementwiseMax::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/ElementwiseMin.cpp b/tests/validation/CL/ElementwiseMin.cpp index 8f53b241ab..2a066908fa 100644 --- a/tests/validation/CL/ElementwiseMin.cpp +++ b/tests/validation/CL/ElementwiseMin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -58,7 +58,7 @@ const auto ElementwiseMinQASYMM8SignedDataset = combine(combine(framework::datas const auto ElementwiseMinQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("DataType", DataType::QSYMM16)); -const auto ElementwiseMinS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), +const auto ElementwiseMinS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); const auto ElementwiseMinFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -80,21 +80,18 @@ TEST_SUITE(ElementwiseMin) // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { true, true, false, false})), + framework::dataset::make("Expected", { true, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLElementwiseMin::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/ElementwiseSquaredDiff.cpp b/tests/validation/CL/ElementwiseSquaredDiff.cpp index 0a4ab6627b..4c732b0885 100644 --- a/tests/validation/CL/ElementwiseSquaredDiff.cpp +++ b/tests/validation/CL/ElementwiseSquaredDiff.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -57,7 +57,7 @@ const auto ElementwiseSquaredDiffQASYMM8Dataset = combine(combine(framework::dat const auto ElementwiseSquaredDiffQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("DataType", DataType::QSYMM16)); -const auto ElementwiseSquaredDiffS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), +const auto ElementwiseSquaredDiffS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); const auto ElementwiseSquaredDiffFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -79,21 +79,18 @@ TEST_SUITE(ElementwiseSquaredDiff) // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { true, true, false, false})), + framework::dataset::make("Expected", { true, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLElementwiseSquaredDiff::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); diff --git a/tests/validation/CL/PReluLayer.cpp b/tests/validation/CL/PReluLayer.cpp index 043262d891..f3f1c8b1b8 100644 --- a/tests/validation/CL/PReluLayer.cpp +++ b/tests/validation/CL/PReluLayer.cpp @@ -56,7 +56,7 @@ const auto PReluLayerQASYMM8Dataset = combine(combine(framework::dataset::make(" const auto PReluLayerQASYMM8SIGNEDDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)); -const auto PReluLayerS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), +const auto PReluLayerS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S16 }), framework::dataset::make("DataType", DataType::S16)), framework::dataset::make("DataType", DataType::S16)); const auto PReluLayerFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataType", DataType::F16)); @@ -71,21 +71,18 @@ TEST_SUITE(PReluLayer) // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), })), - framework::dataset::make("Expected", { true, true, false, false})), + framework::dataset::make("Expected", { true, false, false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLPReluLayer::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); -- cgit v1.2.1