aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp41
-rw-r--r--tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp19
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp40
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h54
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.cpp1
5 files changed, 115 insertions, 40 deletions
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index ef535153f2..8c143060cb 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -61,8 +61,11 @@ TEST_SUITE(BatchNormalizationLayer)
template <typename T>
using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
- shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
+ framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
+ shape0, shape1, epsilon, use_gamma, use_beta, dt)
{
// Set fixed point position data type allowed
const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -77,7 +80,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
// Create and Configure function
CLBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+ CLTensor *beta_ptr = use_beta ? &beta : nullptr;
+ CLTensor *gamma_ptr = use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -150,7 +155,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)))
{
@@ -160,7 +167,9 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framewor
TEST_SUITE_END()
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
framework::dataset::make("DataType", DataType::F16)))
{
@@ -175,10 +184,13 @@ template <typename T>
using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 1, 6)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qs8, 0);
@@ -186,10 +198,13 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t
TEST_SUITE_END()
TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qs16, 0);
diff --git a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
index d817fc0e67..2dbb0e0fbb 100644
--- a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -59,8 +59,11 @@ TEST_SUITE(BatchNormalizationLayer)
template <typename T>
using GCBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<GCTensor, GCAccessor, GCBatchNormalizationLayer, T>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::F32 })),
- shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ shape0, shape1, epsilon, use_beta, use_gamma, dt)
{
// Set fixed point position data type allowed
int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -75,7 +78,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
// Create and Configure function
GCBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+ GCTensor *beta_ptr = use_beta ? &beta : nullptr;
+ GCTensor *gamma_ptr = use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -84,7 +89,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F16)))
{
@@ -94,7 +101,9 @@ FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)))
{
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 054ed278a2..7bf1f2633e 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -63,8 +63,10 @@ TEST_SUITE(BatchNormalizationLayer)
template <typename T>
using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
- shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
+ framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+ shape0, shape1, epsilon, use_beta, use_gamma, dt)
{
// Set fixed point position data type allowed
const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -79,7 +81,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
// Create and Configure function
NEBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+ Tensor *beta_ptr = use_beta ? &beta : nullptr;
+ Tensor *gamma_ptr = use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -150,7 +154,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
// *INDENT-ON*
TEST_SUITE(Float)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)))
{
@@ -161,7 +167,9 @@ TEST_SUITE_END()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(Float16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
framework::dataset::make("DataType", DataType::F16)))
{
@@ -176,10 +184,13 @@ template <typename T>
using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 1, 6)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qs8, 0);
@@ -187,10 +198,13 @@ FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t
TEST_SUITE_END()
TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qs16, 0);
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index e02c619249..4a6ac1af7f 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -45,10 +45,12 @@ class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fix
{
public:
template <typename...>
- void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fractional_bits)
+ void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, int fractional_bits)
{
_fractional_bits = fractional_bits;
_data_type = dt;
+ _use_beta = use_beta;
+ _use_gamma = use_gamma;
_target = compute_target(shape0, shape1, epsilon, act_info, dt, fractional_bits);
_reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits);
}
@@ -67,8 +69,24 @@ protected:
library->fill(src_tensor, distribution, 0);
library->fill(mean_tensor, distribution, 1);
library->fill(var_tensor, distribution_var, 0);
- library->fill(beta_tensor, distribution, 3);
- library->fill(gamma_tensor, distribution, 4);
+ if(_use_beta)
+ {
+ library->fill(beta_tensor, distribution, 3);
+ }
+ else
+ {
+ // Fill with default value 0.f
+ library->fill_tensor_value(beta_tensor, 0.f);
+ }
+ if(_use_gamma)
+ {
+ library->fill(gamma_tensor, distribution, 4);
+ }
+ else
+ {
+ // Fill with default value 1.f
+ library->fill_tensor_value(gamma_tensor, 1.f);
+ }
}
else
{
@@ -80,8 +98,24 @@ protected:
library->fill(src_tensor, distribution, 0);
library->fill(mean_tensor, distribution, 1);
library->fill(var_tensor, distribution_var, 0);
- library->fill(beta_tensor, distribution, 3);
- library->fill(gamma_tensor, distribution, 4);
+ if(_use_beta)
+ {
+ library->fill(beta_tensor, distribution, 3);
+ }
+ else
+ {
+ // Fill with default value 0
+ library->fill_tensor_value(beta_tensor, static_cast<T>(0));
+ }
+ if(_use_gamma)
+ {
+ library->fill(gamma_tensor, distribution, 4);
+ }
+ else
+ {
+ // Fill with default value 1
+ library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits)));
+ }
}
}
@@ -97,7 +131,9 @@ protected:
// Create and configure function
FunctionType norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info);
+ TensorType *beta_ptr = _use_beta ? &beta : nullptr;
+ TensorType *gamma_ptr = _use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon, act_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -149,6 +185,8 @@ protected:
SimpleTensor<T> _reference{};
int _fractional_bits{};
DataType _data_type{};
+ bool _use_beta{};
+ bool _use_gamma{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -156,9 +194,9 @@ class BatchNormalizationLayerValidationFixture : public BatchNormalizationLayerV
{
public:
template <typename...>
- void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
+ void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt)
{
- BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, act_info, dt, 0);
+ BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, 0);
}
};
} // namespace validation
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index a9d9f0320d..c8badacc79 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -106,7 +106,6 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp
const float numerator = src[pos] - mean[i];
const float x_bar = numerator / denominator;
result[pos] = beta[i] + x_bar * gamma[i];
- ;
}
}
}