From 3e84bb662f9c6c3f77837640b44c41b7e3403ed4 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 13 Nov 2019 17:24:43 +0000 Subject: COMPMID-2920: NEInstanceNormalization fails on NHWC validations Improved TensorInfo to accept DataLayout, useful to test the validate functions Removing nighlies tests Moving all vpadds instructions in add.h Change-Id: I96290a6f26272eae865dba48bbc3c6aee4bc0214 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/2287 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas --- tests/validation/CL/InstanceNormalizationLayer.cpp | 28 ++++++---------------- .../validation/NEON/InstanceNormalizationLayer.cpp | 27 +++++---------------- 2 files changed, 13 insertions(+), 42 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/InstanceNormalizationLayer.cpp b/tests/validation/CL/InstanceNormalizationLayer.cpp index 165ab1fa9c..06de9e5303 100644 --- a/tests/validation/CL/InstanceNormalizationLayer.cpp +++ b/tests/validation/CL/InstanceNormalizationLayer.cpp @@ -43,8 +43,8 @@ namespace validation namespace { /** Tolerance for float operations */ -AbsoluteTolerance tolerance_f32(0.001f); -AbsoluteTolerance tolerance_f16(2.f); +AbsoluteTolerance tolerance_f32(0.0015f); +AbsoluteTolerance tolerance_f16(0.5f); } // namespace TEST_SUITE(CL) @@ -57,6 +57,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1 TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32 + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), @@ -66,12 +68,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32) })), - framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })), + framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })), input_info, output_info, expected) { bool is_valid = bool(CLInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), @@ -96,15 +100,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture, fra validate(CLAccessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_f32); -} TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, @@ -117,15 +112,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture, fram validate(CLAccessor(_target), _reference, tolerance_f16); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_f16); -} TEST_SUITE_END() // FP16 TEST_SUITE_END() // InstanceNormalizationLayer TEST_SUITE_END() // CL diff --git a/tests/validation/NEON/InstanceNormalizationLayer.cpp b/tests/validation/NEON/InstanceNormalizationLayer.cpp index b4be6ba109..d2a80f24f7 100644 --- a/tests/validation/NEON/InstanceNormalizationLayer.cpp +++ b/tests/validation/NEON/InstanceNormalizationLayer.cpp @@ -45,7 +45,7 @@ namespace /** Tolerance for float operations */ AbsoluteTolerance tolerance_f32(0.0015f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -AbsoluteTolerance tolerance_f16(0.2f); +AbsoluteTolerance tolerance_f16(0.5f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC } // namespace @@ -59,6 +59,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1 TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32 + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), @@ -68,12 +70,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW), + TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32) })), - framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })), + framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })), input_info, output_info, expected) { bool is_valid = bool(NEInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), @@ -98,15 +102,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture, fra validate(Accessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::Large4DShapes(), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_f32); -} TEST_SUITE_END() // FP32 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -120,16 +115,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture, fram // Validate output validate(Accessor(_target), _reference, tolerance_f16); } - -FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(datasets::LargeShapes(), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("InPlace", { false, true }))) -{ - // Validate output - validate(Accessor(_target), _reference, tolerance_f16); -} TEST_SUITE_END() // FP16 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -- cgit v1.2.1