aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2020-02-11 17:21:31 +0000
committerSiCong Li <sicong.li@arm.com>2020-04-03 08:51:12 +0000
commit8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f (patch)
tree9fb4f4f328f7a17de13bef109834e8ad8a21d2ee /tests
parent15e4d876643c37e1db36ee1190ec52319479ffaf (diff)
downloadComputeLibrary-8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f.tar.gz
COMPMID-3101 Fuse activation with floating point elementwise operation layers in CL
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: I1693f8664ba7c0dc8c076bbe7365cef1e667bd25 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2718 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/datasets/ShapeDatasets.h23
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp65
-rw-r--r--tests/validation/CL/ArithmeticDivision.cpp51
-rw-r--r--tests/validation/CL/ArithmeticSubtraction.cpp65
-rw-r--r--tests/validation/CL/ElementwiseMax.cpp55
-rw-r--r--tests/validation/CL/ElementwiseMin.cpp55
-rw-r--r--tests/validation/CL/ElementwisePower.cpp55
-rw-r--r--tests/validation/CL/ElementwiseSquaredDiff.cpp44
-rw-r--r--tests/validation/CL/PixelWiseMultiplication.cpp32
-rw-r--r--tests/validation/NEON/ElementwiseDivision.cpp6
-rw-r--r--tests/validation/fixtures/ArithmeticOperationsFixture.h72
-rw-r--r--tests/validation/fixtures/ElementwiseOperationsFixture.h229
-rw-r--r--tests/validation/fixtures/PixelWiseMultiplicationFixture.h66
-rw-r--r--tests/validation/reference/ActivationLayer.cpp3
-rw-r--r--tests/validation/reference/ActivationLayer.h2
15 files changed, 657 insertions, 166 deletions
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 2a2047480f..087342d3b8 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -156,7 +156,7 @@ public:
}
};
-/** Data set containing small tensor shapes. */
+/** Data set containing tiny tensor shapes. */
class TinyShapes final : public ShapeDataset
{
public:
@@ -190,6 +190,25 @@ public:
}
};
+/** Data set containing pairs of tiny tensor shapes that are broadcast compatible. */
+class TinyShapesBroadcast final : public framework::dataset::ZipDataset<ShapeDataset, ShapeDataset>
+{
+public:
+ TinyShapesBroadcast()
+ : ZipDataset<ShapeDataset, ShapeDataset>(
+ ShapeDataset("Shape0",
+ {
+ TensorShape{ 9U, 9U },
+ TensorShape{ 10U, 2U, 14U, 2U },
+ }),
+ ShapeDataset("Shape1",
+ {
+ TensorShape{ 9U, 1U, 9U },
+ TensorShape{ 10U },
+ }))
+ {
+ }
+};
/** Data set containing pairs of small tensor shapes that are broadcast compatible. */
class SmallShapesBroadcast final : public framework::dataset::ZipDataset<ShapeDataset, ShapeDataset>
{
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 5702360e45..41415ee481 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,8 +51,8 @@ const auto ArithmeticAdditionQASYMM8Dataset = combine(combine(framework::dataset
framework::dataset::make("DataType",
DataType::QASYMM8));
const auto ArithmeticAdditionQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED));
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED));
const auto ArithmeticAdditionQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)),
framework::dataset::make("DataType",
DataType::QSYMM16));
@@ -62,6 +62,13 @@ const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::m
framework::dataset::make("DataType", DataType::F16));
const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -284,10 +291,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture<int16_t>,
TEST_SUITE_END() // QSYMM16
TEST_SUITE_END() // Quantized
+template <typename T>
+using CLArithmeticAdditionFloatFixture = ArithmeticAdditionValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionFP16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -318,34 +336,53 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Sma
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticAdditionFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
template <typename T>
-using CLArithmeticAdditionBroadcastFixture = ArithmeticAdditionBroadcastValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
+using CLArithmeticAdditionBroadcastFloatFixture = ArithmeticAdditionBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(),
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapesBroadcast(),
+ ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticAdditionBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapesBroadcast(),
ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(),
+FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticAdditionBroadcastFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapesBroadcast(),
ArithmeticAdditionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/CL/ArithmeticDivision.cpp b/tests/validation/CL/ArithmeticDivision.cpp
index 8df770ab98..d970c31daa 100644
--- a/tests/validation/CL/ArithmeticDivision.cpp
+++ b/tests/validation/CL/ArithmeticDivision.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,6 +52,13 @@ const auto ArithmeticDivisionFP16Dataset = combine(combine(framework::dataset::m
framework::dataset::make("DataType", DataType::F16));
const auto ArithmeticDivisionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -87,11 +94,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// *INDENT-ON*
template <typename T>
-using CLArithmeticDivisionFixture = ArithmeticDivisionValidationFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
+using CLArithmeticDivisionFloatFixture = ArithmeticDivisionValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ArithmeticDivisionFP16Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticDivisionFP16Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticDivisionFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ArithmeticDivisionFP16Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
@@ -122,30 +136,47 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), ArithmeticDivisionFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticDivisionFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticDivisionFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ArithmeticDivisionFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticDivisionFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), ArithmeticDivisionFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticDivisionFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticDivisionFP32Dataset),
+ EmptyActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
template <typename T>
-using CLArithmeticDivisionBroadcastFixture = ArithmeticDivisionBroadcastValidationFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
+using CLArithmeticDivisionBroadcastFloatFixture = ArithmeticDivisionBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticDivisionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapesBroadcast(),
- ArithmeticDivisionFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticDivisionBroadcastFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(),
+ ArithmeticDivisionFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticDivisionBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapesBroadcast(),
+ ArithmeticDivisionFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
-FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticDivisionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapesBroadcast(),
- ArithmeticDivisionFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticDivisionBroadcastFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(),
+ ArithmeticDivisionFP32Dataset),
+ EmptyActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/CL/ArithmeticSubtraction.cpp b/tests/validation/CL/ArithmeticSubtraction.cpp
index 592a7ed1a4..897ae1ab09 100644
--- a/tests/validation/CL/ArithmeticSubtraction.cpp
+++ b/tests/validation/CL/ArithmeticSubtraction.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,8 +52,8 @@ const auto ArithmeticSubtractionQASYMM8Dataset = combine(combine(framework::data
framework::dataset::make("DataType",
DataType::QASYMM8));
const auto ArithmeticSubtractionQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED));
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED));
const auto ArithmeticSubtractionQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)),
framework::dataset::make("DataType",
DataType::QSYMM16));
@@ -63,6 +63,13 @@ const auto ArithmeticSubtractionFP16Dataset = combine(combine(framework::dataset
framework::dataset::make("DataType", DataType::F16));
const auto ArithmeticSubtractionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -285,10 +292,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionQuantizedFixture<int16_t
TEST_SUITE_END() // QSYMM16
TEST_SUITE_END() // Quantized
+template <typename T>
+using CLArithmeticSubtractionFloatFixture = ArithmeticSubtractionValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticSubtractionFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), ArithmeticSubtractionFP16Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -319,34 +337,53 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Sma
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticSubtractionFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticSubtractionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLArithmeticSubtractionFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapes(), ArithmeticSubtractionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticSubtractionFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticSubtractionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticSubtractionFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), ArithmeticSubtractionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
template <typename T>
-using CLArithmeticSubtractionBroadcastFixture = ArithmeticSubtractionBroadcastValidationFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T>;
+using CLArithmeticSubtractionBroadcastFloatFixture = ArithmeticSubtractionBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticSubtraction, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticSubtractionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(),
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticSubtractionBroadcastFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapesBroadcast(),
+ ArithmeticSubtractionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLArithmeticSubtractionBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::TinyShapesBroadcast(),
ArithmeticSubtractionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticSubtractionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(),
+FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticSubtractionBroadcastFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapesBroadcast(),
ArithmeticSubtractionFP32Dataset),
- framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+ EmptyActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/CL/ElementwiseMax.cpp b/tests/validation/CL/ElementwiseMax.cpp
index c7b4284e19..879e732cb0 100644
--- a/tests/validation/CL/ElementwiseMax.cpp
+++ b/tests/validation/CL/ElementwiseMax.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,8 +54,8 @@ const auto ElementwiseMaxQASYMM8Dataset = combine(combine(framework::dataset::ma
framework::dataset::make("DataType",
DataType::QASYMM8));
const auto ElementwiseMaxQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED));
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED));
const auto ElementwiseMaxQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)),
framework::dataset::make("DataType",
DataType::QSYMM16));
@@ -65,6 +65,13 @@ const auto ElementwiseMaxFP16Dataset = combine(combine(framework::dataset::make(
framework::dataset::make("DataType", DataType::F16));
const auto ElementwiseMaxFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -229,10 +236,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMaxQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
- ElementwiseMaxQASYMM8SignedDataset),
- framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
- framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
- framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
+ ElementwiseMaxQASYMM8SignedDataset),
+ framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+ framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -274,9 +281,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMaxQuantizedFixture<int16_t>, fram
TEST_SUITE_END()
TEST_SUITE_END()
+template <typename T>
+using CLElementwiseMaxFloatFixture = ElementwiseMaxValidationFloatFixture<CLTensor, CLAccessor, CLElementwiseMax, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMaxFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseMaxFP16Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMaxFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseMaxFP16Dataset), EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwiseMaxFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwiseMaxFP16Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
@@ -307,17 +323,32 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMaxFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseMaxFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMaxFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseMaxFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwiseMaxFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwiseMaxFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
template <typename T>
-using CLElementwiseMaxBroadcastFixture = ElementwiseMaxBroadcastValidationFixture<CLTensor, CLAccessor, CLElementwiseMax, T>;
+using CLElementwiseMaxBroadcastFloatFixture = ElementwiseMaxBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLElementwiseMax, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwiseMaxBroadcastFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(),
- ElementwiseMaxFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwiseMaxBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapesBroadcast(),
+ ElementwiseMaxFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLElementwiseMaxBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapesBroadcast(),
+ ElementwiseMaxFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/CL/ElementwiseMin.cpp b/tests/validation/CL/ElementwiseMin.cpp
index 3d6bde1c98..332fa80d72 100644
--- a/tests/validation/CL/ElementwiseMin.cpp
+++ b/tests/validation/CL/ElementwiseMin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,8 +54,8 @@ const auto ElementwiseMinQASYMM8Dataset = combine(combine(framework::dataset::ma
framework::dataset::make("DataType",
DataType::QASYMM8));
const auto ElementwiseMinQASYMM8SignedDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("DataType",
- DataType::QASYMM8_SIGNED));
+ framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED));
const auto ElementwiseMinQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)),
framework::dataset::make("DataType",
DataType::QSYMM16));
@@ -65,6 +65,13 @@ const auto ElementwiseMinFP16Dataset = combine(combine(framework::dataset::make(
framework::dataset::make("DataType", DataType::F16));
const auto ElementwiseMinFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -229,10 +236,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMinQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
- ElementwiseMinQASYMM8SignedDataset),
- framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
- framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
- framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
+ ElementwiseMinQASYMM8SignedDataset),
+ framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+ framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -274,9 +281,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMinQuantizedFixture<int16_t>, fram
TEST_SUITE_END()
TEST_SUITE_END()
+template <typename T>
+using CLElementwiseMinFloatFixture = ElementwiseMinValidationFloatFixture<CLTensor, CLAccessor, CLElementwiseMin, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMinFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseMinFP16Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMinFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseMinFP16Dataset), EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwiseMinFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwiseMinFP16Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
@@ -307,16 +323,31 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMinFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseMinFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseMinFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseMinFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwiseMinFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwiseMinFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
template <typename T>
-using CLElementwiseMinBroadcastFixture = ElementwiseMinBroadcastValidationFixture<CLTensor, CLAccessor, CLElementwiseMin, T>;
+using CLElementwiseMinBroadcastFloatFixture = ElementwiseMinBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLElementwiseMin, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwiseMinBroadcastFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(),
- ElementwiseMinFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwiseMinBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapesBroadcast(),
+ ElementwiseMinFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLElementwiseMinBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapesBroadcast(),
+ ElementwiseMinFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/CL/ElementwisePower.cpp b/tests/validation/CL/ElementwisePower.cpp
index 46509a2870..ce4fc80bb0 100644
--- a/tests/validation/CL/ElementwisePower.cpp
+++ b/tests/validation/CL/ElementwisePower.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,6 +50,13 @@ const auto ElementwisePowerFP16Dataset = combine(combine(framework::dataset::mak
framework::dataset::make("DataType", DataType::F16));
const auto ElementwisePowerFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -85,21 +92,36 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// *INDENT-ON*
template <typename T>
-using CLElementwisePowerFixture = ElementwisePowerValidationFixture<CLTensor, CLAccessor, CLElementwisePower, T>;
+using CLElementwisePowerFloatFixture = ElementwisePowerValidationFloatFixture<CLTensor, CLAccessor, CLElementwisePower, T>;
template <typename T>
-using CLElementwisePowerBroadcastFixture = ElementwisePowerBroadcastValidationFixture<CLTensor, CLAccessor, CLElementwisePower, T>;
+using CLElementwisePowerBroadcastFloatFixture = ElementwisePowerBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLElementwisePower, T>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwisePowerFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwisePowerFP16Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwisePowerFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwisePowerFP16Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwisePowerFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwisePowerFP16Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
}
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwisePowerBroadcastFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(),
- ElementwisePowerFP16Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwisePowerBroadcastFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapesBroadcast(),
+ ElementwisePowerFP16Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLElementwisePowerBroadcastFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapesBroadcast(),
+ ElementwisePowerFP16Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
@@ -107,14 +129,29 @@ FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwisePowerBroadcastFixture<hal
TEST_SUITE_END() //FP16
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwisePowerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwisePowerFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwisePowerFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwisePowerFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwisePowerFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwisePowerFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwisePowerBroadcastFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(),
- ElementwisePowerFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwisePowerBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapesBroadcast(),
+ ElementwisePowerFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLElementwisePowerBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapesBroadcast(),
+ ElementwisePowerFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/CL/ElementwiseSquaredDiff.cpp b/tests/validation/CL/ElementwiseSquaredDiff.cpp
index edc150109e..86fdc21d6d 100644
--- a/tests/validation/CL/ElementwiseSquaredDiff.cpp
+++ b/tests/validation/CL/ElementwiseSquaredDiff.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,6 +64,13 @@ const auto ElementwiseSquaredDiffFP16Dataset = combine(combine(framework::datase
framework::dataset::make("DataType", DataType::F16));
const auto ElementwiseSquaredDiffFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataType", DataType::F32));
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} // namespace
TEST_SUITE(CL)
@@ -239,9 +246,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseSquaredDiffQuantizedFixture<int16_
TEST_SUITE_END()
TEST_SUITE_END()
+template <typename T>
+using CLElementwiseSquaredDiffFloatFixture = ElementwiseSquaredDiffValidationFloatFixture<CLTensor, CLAccessor, CLElementwiseSquaredDiff, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseSquaredDiffFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseSquaredDiffFP16Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseSquaredDiffFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseSquaredDiffFP16Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwiseSquaredDiffFloatFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwiseSquaredDiffFP16Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
@@ -272,16 +289,31 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, datasets::SmallShapes
validate(dst.info()->padding(), padding);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseSquaredDiffFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ElementwiseSquaredDiffFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLElementwiseSquaredDiffFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseSquaredDiffFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivation, CLElementwiseSquaredDiffFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapes(), ElementwiseSquaredDiffFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
template <typename T>
-using CLElementwiseSquaredDiffBroadcastFixture = ElementwiseSquaredDiffBroadcastValidationFixture<CLTensor, CLAccessor, CLElementwiseSquaredDiff, T>;
+using CLElementwiseSquaredDiffBroadcastFloatFixture = ElementwiseSquaredDiffBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLElementwiseSquaredDiff, T>;
-FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwiseSquaredDiffBroadcastFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(),
- ElementwiseSquaredDiffFP32Dataset))
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLElementwiseSquaredDiffBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapesBroadcast(),
+ ElementwiseSquaredDiffFP32Dataset),
+ EmptyActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+FIXTURE_DATA_TEST_CASE(RunWithActivationBroadcast, CLElementwiseSquaredDiffBroadcastFloatFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::TinyShapesBroadcast(),
+ ElementwiseSquaredDiffFP32Dataset),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_fp32);
diff --git a/tests/validation/CL/PixelWiseMultiplication.cpp b/tests/validation/CL/PixelWiseMultiplication.cpp
index ff9101a997..310828c48d 100644
--- a/tests/validation/CL/PixelWiseMultiplication.cpp
+++ b/tests/validation/CL/PixelWiseMultiplication.cpp
@@ -43,20 +43,27 @@ namespace
const float scale_255 = 1.f / 255.f;
constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit quantized asymmetric data types */
constexpr AbsoluteTolerance<float> tolerance_qsymm16(1); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit quantized symmetric data types */
+const auto EmptyActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{ ActivationLayerInfo() });
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.75f, 0.25f)
+});
} //namespace
// *INDENT-OFF*
// clang-format off
#define VALIDATE(TYPE, TOLERANCE) validate(CLAccessor(_target), _reference, AbsoluteTolerance<TYPE>(TOLERANCE), 0.f);
-#define PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, SCALE, RP, VALIDATE) \
+#define PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(TEST_NAME, FIXTURE, MODE, SHAPES, DT1, DT2, SCALE, RP, ACT, VALIDATE) \
FIXTURE_DATA_TEST_CASE(TEST_NAME, CLPixelWiseMultiplication##FIXTURE, framework::DatasetMode::MODE, \
- combine(combine(combine(combine(combine( \
+ combine(combine(combine(combine(combine(combine( \
datasets::SHAPES, \
framework::dataset::make("DataType1", DataType::DT1)), \
framework::dataset::make("DataType2", DataType::DT2)), \
framework::dataset::make("Scale", std::move(SCALE))), \
datasets::ConvertPolicies()), \
- framework::dataset::make("RoundingPolicy", RoundingPolicy::RP))) \
+ framework::dataset::make("RoundingPolicy", RoundingPolicy::RP)), ACT)) \
{ \
VALIDATE \
}
@@ -65,11 +72,11 @@ constexpr AbsoluteTolerance<float> tolerance_qsymm16(1); /**< Tolerance value fo
} // namespace
template <typename T>
-using CLPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, half_float::half>;
+using CLPixelWiseMultiplicationToF16Fixture = PixelWiseMultiplicationValidationFloatFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, half_float::half>;
template <typename T>
-using CLPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>;
+using CLPixelWiseMultiplicationToF32Fixture = PixelWiseMultiplicationValidationFloatFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>;
template <typename T>
-using CLPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>;
+using CLPixelWiseMultiplicationBroadcastFixture = PixelWiseMultiplicationBroadcastValidationFloatFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, float>;
TEST_SUITE(CL)
TEST_SUITE(PixelWiseMultiplication)
@@ -110,17 +117,24 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TEST_SUITE(F16toF16)
TEST_SUITE(Scale255)
-PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, ToF16Fixture<half_float::half>, PRECOMMIT, SmallShapes(), F16, F16, scale_255, TO_NEAREST_UP, VALIDATE(float, 1.f))
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, ToF16Fixture<half_float::half>, PRECOMMIT, SmallShapes(), F16, F16, scale_255, TO_NEAREST_UP, EmptyActivationFunctionsDataset,
+ VALIDATE(float, 1.f))
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunWithActivation, ToF16Fixture<half_float::half>, ALL, TinyShapes(), F16, F16, scale_255, TO_NEAREST_UP, ActivationFunctionsDataset, VALIDATE(float,
+ 1.f))
TEST_SUITE_END() // Scale255
TEST_SUITE_END() // F16toF16
TEST_SUITE(F32toF32)
TEST_SUITE(Scale255)
-PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, ToF32Fixture<float>, PRECOMMIT, SmallShapes(), F32, F32, scale_255, TO_NEAREST_UP, VALIDATE(float, 1.f))
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmall, ToF32Fixture<float>, PRECOMMIT, SmallShapes(), F32, F32, scale_255, TO_NEAREST_UP, EmptyActivationFunctionsDataset, VALIDATE(float, 1.f))
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunWithActivation, ToF32Fixture<float>, ALL, TinyShapes(), F32, F32, scale_255, TO_NEAREST_UP, ActivationFunctionsDataset, VALIDATE(float, 1.f))
TEST_SUITE_END() // Scale255
TEST_SUITE_END() // F32toF32
-PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, BroadcastFixture<float>, PRECOMMIT, SmallShapesBroadcast(), F32, F32, scale_255, TO_NEAREST_UP, VALIDATE(float, 1.f))
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, BroadcastFixture<float>, PRECOMMIT, SmallShapesBroadcast(), F32, F32, scale_255, TO_NEAREST_UP, EmptyActivationFunctionsDataset,
+ VALIDATE(float, 1.f))
+PIXEL_WISE_MULTIPLICATION_FIXTURE_DATA_TEST_CASE(RunWithActivationSmallBroadcast, BroadcastFixture<float>, ALL, TinyShapesBroadcast(), F32, F32, scale_255, TO_NEAREST_UP, ActivationFunctionsDataset,
+ VALIDATE(float, 1.f))
template <typename T>
using CLPixelWiseMultiplicationQuantizedFixture = PixelWiseMultiplicationValidationQuantizedFixture<CLTensor, CLAccessor, CLPixelWiseMultiplication, T, T>;
diff --git a/tests/validation/NEON/ElementwiseDivision.cpp b/tests/validation/NEON/ElementwiseDivision.cpp
index e68ce19eed..f5e1f86dbc 100644
--- a/tests/validation/NEON/ElementwiseDivision.cpp
+++ b/tests/validation/NEON/ElementwiseDivision.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,7 +57,7 @@ TEST_SUITE(NEON)
TEST_SUITE(ElementwiseDivision)
template <typename T>
-using NEElementwiseDivisionFixture = ElementwiseDivisionValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
+using NEElementwiseDivisionFixture = ArithmeticDivisionValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
// *INDENT-OFF*
// clang-format off
@@ -124,7 +124,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseDivisionFixture<float>, framework:
}
template <typename T>
-using NEElementwiseDivisionBroadcastFixture = ElementwiseDivisionBroadcastValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
+using NEElementwiseDivisionBroadcastFixture = ArithmeticDivisionBroadcastValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEElementwiseDivisionBroadcastFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapesBroadcast(),
ElementwiseDivisionFP32Dataset))
diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h
index d495ab1049..4a6b0bd3f3 100644
--- a/tests/validation/fixtures/ArithmeticOperationsFixture.h
+++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h
@@ -32,6 +32,7 @@
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/ArithmeticOperations.h"
namespace arm_compute
@@ -47,9 +48,10 @@ public:
template <typename...>
void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1,
DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
{
_op = op;
+ _act_info = act_info;
_target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
_reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
}
@@ -71,7 +73,7 @@ protected:
// Create and configure function
FunctionType arith_op;
- arith_op.configure(&ref_src1, &ref_src2, &dst, convert_policy);
+ arith_op.configure(&ref_src1, &ref_src2, &dst, convert_policy, _act_info);
ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -109,12 +111,14 @@ protected:
fill(ref_src1, 0);
fill(ref_src2, 1);
- return reference::arithmetic_operation<T>(_op, ref_src1, ref_src2, ref_dst, convert_policy);
+ auto result = reference::arithmetic_operation<T>(_op, ref_src1, ref_src2, ref_dst, convert_policy);
+ return _act_info.enabled() ? reference::activation_layer(result, _act_info, qinfo_out) : result;
}
TensorType _target{};
SimpleTensor<T> _reference{};
reference::ArithmeticOperation _op{ reference::ArithmeticOperation::ADD };
+ ActivationLayerInfo _act_info{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -125,7 +129,7 @@ public:
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
}
};
@@ -137,7 +141,31 @@ public:
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticAdditionBroadcastValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1,
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticAdditionValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
+ output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
@@ -151,7 +179,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1,
- output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
}
};
@@ -164,7 +192,20 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1,
data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticSubtractionBroadcastValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1,
+ data_type0, data_type1, output_data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
@@ -177,7 +218,20 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
data_type0, data_type1, output_data_type, convert_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticSubtractionValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
+ data_type0, data_type1, output_data_type, convert_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
@@ -192,7 +246,7 @@ public:
{
ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape,
data_type0, data_type1, output_data_type,
- convert_policy, qinfo0, qinfo1, qinfo_out);
+ convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h
index de61c487e6..44c096c521 100644
--- a/tests/validation/fixtures/ElementwiseOperationsFixture.h
+++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,6 +32,7 @@
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/ElementwiseOperations.h"
namespace arm_compute
@@ -127,6 +128,70 @@ protected:
ArithmeticOperation _op{ ArithmeticOperation::ADD };
};
+// Arithmetic operation fused with activation function
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticOperationsFuseActivationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1,
+ DataType data_type0, DataType data_type1, DataType output_data_type,
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ qinfo0, qinfo1, qinfo_out);
+ _act_info = act_info;
+ }
+
+protected:
+ TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type,
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ {
+ // Create tensors
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out);
+
+ // Create and configure function
+ FunctionType elem_op;
+ elem_op.configure(&ref_src1, &ref_src2, &dst, _act_info);
+
+ ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ ref_src1.allocator()->allocate();
+ ref_src2.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(ref_src1), 0);
+ fill(AccessorType(ref_src2), 1);
+
+ // Compute function
+ elem_op.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1,
+ DataType data_type0, DataType data_type1, DataType output_data_type,
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ {
+ auto result = ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::compute_reference(shape0, shape1, data_type0,
+ data_type1, output_data_type, qinfo0, qinfo1, qinfo_out);
+ return _act_info.enabled() ? reference::activation_layer(result, _act_info, qinfo_out) : result;
+ }
+
+ ActivationLayerInfo _act_info{};
+};
+
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ArithmeticDivisionBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
@@ -154,6 +219,32 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticDivisionBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticDivisionValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ArithmeticDivisionValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -195,6 +286,32 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementwiseMaxBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementwiseMaxValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ElementwiseMaxValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -251,6 +368,32 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementwiseMinBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementwiseMinValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ElementwiseMinValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -307,6 +450,32 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementwiseSquaredDiffBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ElementwiseSquaredDiffValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
+ {
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ElementwiseSquaredDiffValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -393,84 +562,54 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementwiseDivisionBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+class ElementwisePowerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
{
- ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1,
+ ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1,
data_type0, data_type1, output_data_type,
QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementwiseDivisionValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+class ElementwisePowerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
{
- ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
+ ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape,
data_type0, data_type1, output_data_type,
QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementwiseDivisionValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
-
- {
- ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape,
- data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementwiseDivisionQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
-{
-public:
- template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
-
- {
- ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1,
- data_type0, data_type1, output_data_type,
- qinfo0, qinfo1, qinfo_out);
- }
-};
-
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementwisePowerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+class ElementwisePowerBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
- void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
{
- ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1,
- data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ElementwisePowerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+class ElementwisePowerValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
- void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info)
{
- ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape,
- data_type0, data_type1, output_data_type,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape,
+ data_type0, data_type1, output_data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
diff --git a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
index 37359f421b..f561a37a71 100644
--- a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
+++ b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
@@ -31,6 +31,7 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/PixelWiseMultiplication.h"
namespace arm_compute
@@ -46,18 +47,19 @@ public:
template <typename...>
void setup(const TensorShape &shape0,
const TensorShape &shape1,
- DataType dt_in1,
- DataType dt_in2,
- DataType dt_out,
- float scale,
- ConvertPolicy convert_policy,
- RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0,
- QuantizationInfo qinfo1,
- QuantizationInfo qinfo_out)
+ DataType dt_in1,
+ DataType dt_in2,
+ DataType dt_out,
+ float scale,
+ ConvertPolicy convert_policy,
+ RoundingPolicy rounding_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out,
+ ActivationLayerInfo act_info)
{
- _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out);
- _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out);
+ _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
+ _reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
}
protected:
@@ -69,7 +71,7 @@ protected:
TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out,
float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
{
// Create tensors
TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0);
@@ -78,7 +80,7 @@ protected:
// Create and configure function
FunctionType multiply;
- multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy);
+ multiply.configure(&src1, &src2, &dst, scale, convert_policy, rounding_policy, act_info);
ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -105,7 +107,7 @@ protected:
SimpleTensor<T3> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out,
float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
+ QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info)
{
// Create reference
SimpleTensor<T1> src1{ shape0, dt_in1, 1, qinfo0 };
@@ -115,7 +117,8 @@ protected:
fill(src1, 0);
fill(src2, 1);
- return reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, qinfo_out);
+ auto result = reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, qinfo_out);
+ return act_info.enabled() ? reference::activation_layer(result, act_info, qinfo_out) : result;
}
TensorType _target{};
@@ -130,7 +133,7 @@ public:
void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
}
};
@@ -142,7 +145,32 @@ public:
void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy)
{
PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
- QuantizationInfo(), QuantizationInfo(), QuantizationInfo());
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class PixelWiseMultiplicationValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info)
+ {
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
+class PixelWiseMultiplicationBroadcastValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
+ ActivationLayerInfo act_info)
+ {
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape0, shape1, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info);
}
};
@@ -154,8 +182,8 @@ public:
void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out)
{
- PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy,
- rounding_policy, qinfo0, qinfo1, qinfo_out);
+ PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ qinfo0, qinfo1, qinfo_out, ActivationLayerInfo());
}
};
} // namespace validation
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 7a699c5f86..4aa0f880da 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,6 +87,7 @@ SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src
return dst;
}
+template SimpleTensor<int32_t> activation_layer(const SimpleTensor<int32_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
} // namespace reference
diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h
index f41e87123e..4585a9db10 100644
--- a/tests/validation/reference/ActivationLayer.h
+++ b/tests/validation/reference/ActivationLayer.h
@@ -82,7 +82,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = x;
break;
case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
- ret = x * ((std::min(std::max((x + 3), 0.0f), 6.0f)) * 0.166666667f);
+ ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) * 0.166666667f);
break;
default:
ARM_COMPUTE_ERROR("Unsupported activation function");