aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/InstanceNormalizationLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/NEON/InstanceNormalizationLayer.cpp')
-rw-r--r--tests/validation/NEON/InstanceNormalizationLayer.cpp27
1 files changed, 6 insertions, 21 deletions
diff --git a/tests/validation/NEON/InstanceNormalizationLayer.cpp b/tests/validation/NEON/InstanceNormalizationLayer.cpp
index b4be6ba109..d2a80f24f7 100644
--- a/tests/validation/NEON/InstanceNormalizationLayer.cpp
+++ b/tests/validation/NEON/InstanceNormalizationLayer.cpp
@@ -45,7 +45,7 @@ namespace
/** Tolerance for float operations */
AbsoluteTolerance<float> tolerance_f32(0.0015f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-AbsoluteTolerance<float> tolerance_f16(0.2f);
+AbsoluteTolerance<float> tolerance_f16(0.5f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
} // namespace
@@ -59,6 +59,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
@@ -68,12 +70,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
+ TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
})),
- framework::dataset::make("Expected", { false, false, false, false, true, true, true, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })),
input_info, output_info, expected)
{
bool is_valid = bool(NEInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false),
@@ -98,15 +102,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<float>, fra
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("InPlace", { false, true })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_f32);
-}
TEST_SUITE_END() // FP32
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -120,16 +115,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<half>, fram
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
}
-
-FIXTURE_DATA_TEST_CASE(RunLarge, NEInstanceNormalizationLayerFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
- framework::dataset::make("InPlace", { false, true })))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_f16);
-}
TEST_SUITE_END() // FP16
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC