From d1794ebfa10d05af7d2458c5d506152fd38068d3 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 15 Jun 2018 16:15:26 +0100 Subject: COMPMID-1226 Extend CLMeanStdDev to support FP32 / FP16 - Extend support for FP16 in CLReduction. - For F16/F32 MeanStdDev we perform one reduction operation for mean and one for stddev and we calculate the final result in the host CPU. Change-Id: Iad2099f26c0ba7969737d22f00c6c275634d875c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/135870 Tested-by: Jenkins Reviewed-by: Georgios Pinitas --- tests/validation/CL/ReductionOperation.cpp | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) (limited to 'tests/validation/CL/ReductionOperation.cpp') diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp index a48e2f9d5f..ca0988f955 100644 --- a/tests/validation/CL/ReductionOperation.cpp +++ b/tests/validation/CL/ReductionOperation.cpp @@ -45,6 +45,7 @@ namespace { /** Tolerance for float operations */ RelativeTolerance tolerance_f32(0.00001f); +RelativeTolerance tolerance_f16(0.1f); } // namespace TEST_SUITE(CL) @@ -55,7 +56,7 @@ TEST_SUITE(ReductionOperation) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1 - TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F32 + TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F16/F32 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32) @@ -84,9 +85,23 @@ template using CLReductionOperationFixture = ReductionOperationValidationFixture; TEST_SUITE(Float) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLReductionOperationFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations())) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLReductionOperationFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations())) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} +TEST_SUITE_END() // F16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLReductionOperationFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations())) + combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations())) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -97,11 +112,11 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLReductionOperationFixture, framework:: // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } -TEST_SUITE_END() -TEST_SUITE_END() +TEST_SUITE_END() // F32 +TEST_SUITE_END() // Float -TEST_SUITE_END() -TEST_SUITE_END() +TEST_SUITE_END() // Reduction +TEST_SUITE_END() // CL } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1