aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/BatchNormalizationLayer.cpp
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-06-18 14:40:56 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:53:09 +0000
commit0fc25454b6ce499b7f89792f91b81a61a42d3182 (patch)
tree0c988cbbef3f5f1967cc2f77672d05a702191349 /tests/validation/NEON/BatchNormalizationLayer.cpp
parentf5f0ecca39c905bc8c1f90e7469eed2221ca662f (diff)
downloadComputeLibrary-0fc25454b6ce499b7f89792f91b81a61a42d3182.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Remove QS8 and QS16 validation and benchmark tests Change-Id: I566f1474c1fafcb3903115ec2d3a003d73e4c93b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133762 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'tests/validation/NEON/BatchNormalizationLayer.cpp')
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp60
1 files changed, 3 insertions, 57 deletions
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 53fd0163ff..3a18a0a93b 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -48,9 +48,7 @@ constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
-constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */
-const auto act_infos = framework::dataset::make("ActivationInfo",
+const auto act_infos = framework::dataset::make("ActivationInfo",
{
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
@@ -66,7 +64,7 @@ using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixtur
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
- framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+ framework::dataset::make("DataType", { DataType::F32 })),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_beta, use_gamma, dt, data_layout)
{
@@ -106,46 +104,30 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point position
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), // Fused activation with fixed point not supported
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 3),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2),
- TensorInfo(),
})),
framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(2U), 1, DataType::F16),
TensorInfo(TensorShape(2U), 1, DataType::F32),
TensorInfo(TensorShape(5U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
- TensorInfo(TensorShape(2U), 1, DataType::QS8, 2),
})),
framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f),
- ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f, 2.f),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f),
- ActivationLayerInfo(),
- ActivationLayerInfo(),
})),
- framework::dataset::make("Expected", { true, false, false, false, false, false, false, false, true, true})),
+ framework::dataset::make("Expected", { true, false, false, false, false, false})),
input_info, output_info, mvbg_info, act_info, expected)
{
const auto &mean_info = mvbg_info;
@@ -191,42 +173,6 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE_END()
-TEST_SUITE(Quantized)
-template <typename T>
-using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
-
-TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("UseBeta", false)),
- framework::dataset::make("UseGamma", false)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)),
- framework::dataset::make("FractionalBits", 1, 6)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs8, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("UseBeta", false)),
- framework::dataset::make("UseGamma", false)),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("DataLayout", DataLayout::NCHW)),
- framework::dataset::make("FractionalBits", 1, 14)))
-{
- // Validate output
- validate(Accessor(_target), _reference, tolerance_qs16, 0);
-}
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation