aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-11-13 17:24:43 +0000
committerSiCong Li <sicong.li@arm.com>2019-11-21 13:21:33 +0000
commit3e84bb662f9c6c3f77837640b44c41b7e3403ed4 (patch)
tree7096e2a6506baf62ddea78556413fa193ebec76f /tests
parent5f7dda6f415f8f065f86b9f52ee6c5c85bbaa5e4 (diff)
downloadComputeLibrary-3e84bb662f9c6c3f77837640b44c41b7e3403ed4.tar.gz
COMPMID-2920: NEInstanceNormalization fails on NHWC validations
Improved TensorInfo to accept DataLayout, useful to test the validate functions Removing nighlies tests Moving all vpadds instructions in add.h Change-Id: I96290a6f26272eae865dba48bbc3c6aee4bc0214 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/2287 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/InstanceNormalizationLayer.cpp28
-rw-r--r--tests/validation/NEON/InstanceNormalizationLayer.cpp27
2 files changed, 13 insertions, 42 deletions
diff --git a/tests/validation/CL/InstanceNormalizationLayer.cpp b/tests/validation/CL/InstanceNormalizationLayer.cpp
index 165ab1fa9c..06de9e5303 100644
--- a/tests/validation/CL/InstanceNormalizationLayer.cpp
+++ b/tests/validation/CL/InstanceNormalizationLayer.cpp
@@ -43,8 +43,8 @@ namespace validation
namespace
{
/** Tolerance for float operations */
-AbsoluteTolerance<float> tolerance_f32(0.001f);
-AbsoluteTolerance<float> tolerance_f16(2.f);
+AbsoluteTolerance<float> tolerance_f32(0.0015f);
+AbsoluteTolerance<float> tolerance_f16(0.5f);
} // namespace
TEST_SUITE(CL)
@@ -57,6 +57,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
@@ -66,12 +68,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
})),
- framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })),
input_info, output_info, expected)
{
bool is_valid = bool(CLInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false),
@@ -96,15 +100,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture<float>, fra
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("InPlace", { false, true })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
-}
TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
@@ -117,15 +112,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture<half>, fram
validate(CLAccessor(_target), _reference, tolerance_f16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("InPlace", { false, true })))
-{
- // Validate output
- validate(CLAccessor(_target), _reference, tolerance_f16);
-}
TEST_SUITE_END() // FP16
TEST_SUITE_END() // InstanceNormalizationLayer
TEST_SUITE_END() // CL
diff --git a/tests/validation/NEON/InstanceNormalizationLayer.cpp b/tests/validation/NEON/InstanceNormalizationLayer.cpp
index b4be6ba109..d2a80f24f7 100644
--- a/tests/validation/NEON/InstanceNormalizationLayer.cpp
+++ b/tests/validation/NEON/InstanceNormalizationLayer.cpp
@@ -45,7 +45,7 @@ namespace
/** Tolerance for float operations */
AbsoluteTolerance<float> tolerance_f32(0.0015f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-AbsoluteTolerance<float> tolerance_f16(0.2f);
+AbsoluteTolerance<float> tolerance_f16(0.5f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
} // namespace
@@ -59,6 +59,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
@@ -68,12 +70,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
})),
- framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })),
input_info, output_info, expected)
{
bool is_valid = bool(NEInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false),
@@ -98,15 +102,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<float>, fra
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("InPlace", { false, true })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_f32);
-}
TEST_SUITE_END() // FP32
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -120,16 +115,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<half>, fram
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("InPlace", { false, true })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_f16);
-}
TEST_SUITE_END() // FP16
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC