aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/Winograd.cpp
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-11-16 11:33:12 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-16 17:37:40 +0000
commita25d16c86f0d870408bc8b941aa755093417b0f0 (patch)
treeb62d145a4e5009d894262a7ffa66cdba8260bb03 /tests/validation/CL/Winograd.cpp
parenta7b54f44e2bf133179f24a34007bc93237dd2265 (diff)
downloadComputeLibrary-a25d16c86f0d870408bc8b941aa755093417b0f0.tar.gz
COMPMID-1266 : Add support for FP16 in CLWinogradConvolutionLayer: 5x5 kernels
Introduced F32 accumulation for F16 winograd gemm and output transform WinogradConvolution will be available for F16 only if fast math flag is enabled Change-Id: I215593c205236a0f9669218437bb40b184ec6a4f
Diffstat (limited to 'tests/validation/CL/Winograd.cpp')
-rw-r--r--tests/validation/CL/Winograd.cpp21
1 files changed, 12 insertions, 9 deletions
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 930f7aa8ce..f7f06b7f79 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -58,6 +58,9 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.001f);
const AbsoluteTolerance<half> tolerance_f16(half(0.5f));
constexpr AbsoluteTolerance<float> tolerance_convolution_layer_f32(0.1f);
const AbsoluteTolerance<half> tolerance_convolution_layer_f16(half(0.4f));
+RelativeTolerance<half_float::half> rel_tolerance_f16(half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for FP16 data types */
+constexpr float tolerance_num = 0.05f; /**< Tolerance number */
+constexpr float abs_tolerance_convolution_layer_f16 = 2.5f; /**< Tolerance number */
// Input transform
const auto SmallWinogradInputTransformDatasetNCHW =
@@ -834,10 +837,10 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, fram
TEST_SUITE_END() // Conv1x5
TEST_SUITE_END() // FP32
-#ifdef WINOGRAD_F16_SUPPORT //to be reintroduced after COMPMID-1266 is resolved
+
TEST_SUITE(FP16)
-using CLWinogradConvolutionLayerFastMathFixture16 = WinogradConvolutionLayerFastMathValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, half>;
+using CLWinogradConvolutionLayerFastMathFixture16 = WinogradConvolutionLayerFastMathValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, half, float>;
TEST_SUITE(Conv3x3)
FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture16, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
@@ -856,7 +859,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv3x3
@@ -878,7 +881,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv3x1
@@ -900,7 +903,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv1x3
@@ -924,7 +927,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv5x5
@@ -948,7 +951,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv5x1
@@ -972,12 +975,12 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture16, fr
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f16);
+ validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_convolution_layer_f16);
}
TEST_SUITE_END() // Conv1x5
TEST_SUITE_END() // FP16
-#endif /*#ifdef WINOGRAD_F16_SUPPORT*/
+
TEST_SUITE_END() // ConvolutionLayer
TEST_SUITE_END() // Winograd
TEST_SUITE_END() // CL