aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/AssetsLibrary.h16
-rw-r--r--tests/benchmark/CL/BatchNormalizationLayer.cpp24
-rw-r--r--tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp28
-rw-r--r--tests/benchmark/NEON/BatchNormalizationLayer.cpp24
-rw-r--r--tests/benchmark/fixtures/BatchNormalizationLayerFixture.h6
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp41
-rw-r--r--tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp19
-rw-r--r--tests/validation/NEON/BatchNormalizationLayer.cpp40
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h54
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.cpp1
10 files changed, 191 insertions, 62 deletions
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index ae88824298..4bbe4c56f6 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -352,6 +352,16 @@ public:
template <typename T>
void fill_layer_data(T &&tensor, std::string name) const;
+ /** Fill a tensor with a constant value
+ *
+ * @param[in, out] tensor To be filled tensor.
+ * @param[in] value Value to be assigned to all elements of the input tensor.
+ *
+ * @note @p value must be of the same type as the data type of @p tensor
+ */
+ template <typename T, typename D>
+ void fill_tensor_value(T &&tensor, D value) const;
+
private:
// Function prototype to convert between image formats.
using Converter = void (*)(const RawTensor &src, RawTensor &dst);
@@ -774,6 +784,12 @@ void AssetsLibrary::fill_layer_data(T &&tensor, std::string name) const
});
}
}
+
+template <typename T, typename D>
+void AssetsLibrary::fill_tensor_value(T &&tensor, D value) const
+{
+ fill_tensor_uniform(tensor, 0, value, value);
+}
} // namespace test
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__ */
diff --git a/tests/benchmark/CL/BatchNormalizationLayer.cpp b/tests/benchmark/CL/BatchNormalizationLayer.cpp
index 82c780008b..3312319aac 100644
--- a/tests/benchmark/CL/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/CL/BatchNormalizationLayer.cpp
@@ -51,19 +51,25 @@ using CLBatchNormalizationLayerFixture = BatchNormalizationLayerFixture<CLTensor
TEST_SUITE(CL)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
@@ -71,19 +77,25 @@ REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLB
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
diff --git a/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp
index 6e5836ef9e..9a2950b88d 100644
--- a/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -51,19 +51,25 @@ using GCBatchNormalizationLayerFixture = BatchNormalizationLayerFixture<GCTensor
TEST_SUITE(GC)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), framework::dataset::make("ActivationInfo",
- ActivationLayerInfo())),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
@@ -71,19 +77,25 @@ REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCB
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), framework::dataset::make("ActivationInfo",
- ActivationLayerInfo())),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
diff --git a/tests/benchmark/NEON/BatchNormalizationLayer.cpp b/tests/benchmark/NEON/BatchNormalizationLayer.cpp
index 6d28318b2e..786a5b1701 100644
--- a/tests/benchmark/NEON/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/NEON/BatchNormalizationLayer.cpp
@@ -56,36 +56,48 @@ using NEBatchNormalizationLayerFixture = BatchNormalizationLayerFixture<Tensor,
TEST_SUITE(NEON)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
- framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+ framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+ framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
framework::dataset::make("Batches", { 4, 8 })));
diff --git a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
index fbb7700710..c55bb2acc9 100644
--- a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
@@ -42,7 +42,7 @@ class BatchNormalizationLayerFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, ActivationLayerInfo act_info, DataType data_type, int batches)
+ void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, bool use_gamma, bool use_beta, ActivationLayerInfo act_info, DataType data_type, int batches)
{
// Set batched in source and destination shapes
const unsigned int fixed_point_position = 4;
@@ -57,7 +57,9 @@ public:
gamma = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
// Create and configure function
- batch_norm_layer.configure(&src, &dst, &mean, &variance, &beta, &gamma, epsilon, act_info);
+ TensorType *beta_ptr = use_beta ? &beta : nullptr;
+ TensorType *gamma_ptr = use_gamma ? &gamma : nullptr;
+ batch_norm_layer.configure(&src, &dst, &mean, &variance, beta_ptr, gamma_ptr, epsilon, act_info);
// Allocate tensors
src.allocator()->allocate();
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index ef535153f2..8c143060cb 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -61,8 +61,11 @@ TEST_SUITE(BatchNormalizationLayer)
template <typename T>
using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
- shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
+ framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
+ shape0, shape1, epsilon, use_gamma, use_beta, dt)
{
// Set fixed point position data type allowed
const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -77,7 +80,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
// Create and Configure function
CLBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+ CLTensor *beta_ptr = use_beta ? &beta : nullptr;
+ CLTensor *gamma_ptr = use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -150,7 +155,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)))
{
@@ -160,7 +167,9 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framewor
TEST_SUITE_END()
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
framework::dataset::make("DataType", DataType::F16)))
{
@@ -175,10 +184,13 @@ template <typename T>
using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 1, 6)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qs8, 0);
@@ -186,10 +198,13 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t
TEST_SUITE_END()
TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qs16, 0);
diff --git a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
index d817fc0e67..2dbb0e0fbb 100644
--- a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -59,8 +59,11 @@ TEST_SUITE(BatchNormalizationLayer)
template <typename T>
using GCBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<GCTensor, GCAccessor, GCBatchNormalizationLayer, T>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::F32 })),
- shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ shape0, shape1, epsilon, use_beta, use_gamma, dt)
{
// Set fixed point position data type allowed
int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -75,7 +78,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
// Create and Configure function
GCBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+ GCTensor *beta_ptr = use_beta ? &beta : nullptr;
+ GCTensor *gamma_ptr = use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -84,7 +89,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
TEST_SUITE(Float)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F16)))
{
@@ -94,7 +101,9 @@ FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework
TEST_SUITE_END()
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)))
{
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 054ed278a2..7bf1f2633e 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -63,8 +63,10 @@ TEST_SUITE(BatchNormalizationLayer)
template <typename T>
using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
- shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
+ framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+ shape0, shape1, epsilon, use_beta, use_gamma, dt)
{
// Set fixed point position data type allowed
const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -79,7 +81,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Ran
// Create and Configure function
NEBatchNormalizationLayer norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+ Tensor *beta_ptr = use_beta ? &beta : nullptr;
+ Tensor *gamma_ptr = use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -150,7 +154,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
// *INDENT-ON*
TEST_SUITE(Float)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)))
{
@@ -161,7 +167,9 @@ TEST_SUITE_END()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(Float16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ combine(framework::dataset::make("UseBeta", { false, true }),
+ framework::dataset::make("UseGamma", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
framework::dataset::make("DataType", DataType::F16)))
{
@@ -176,10 +184,13 @@ template <typename T>
using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 1, 6)))
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 1, 6)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qs8, 0);
@@ -187,10 +198,13 @@ FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t
TEST_SUITE_END()
TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
- framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+ framework::dataset::make("UseBeta", false)),
+ framework::dataset::make("UseGamma", false)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qs16, 0);
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index e02c619249..4a6ac1af7f 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -45,10 +45,12 @@ class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fix
{
public:
template <typename...>
- void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fractional_bits)
+ void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, int fractional_bits)
{
_fractional_bits = fractional_bits;
_data_type = dt;
+ _use_beta = use_beta;
+ _use_gamma = use_gamma;
_target = compute_target(shape0, shape1, epsilon, act_info, dt, fractional_bits);
_reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits);
}
@@ -67,8 +69,24 @@ protected:
library->fill(src_tensor, distribution, 0);
library->fill(mean_tensor, distribution, 1);
library->fill(var_tensor, distribution_var, 0);
- library->fill(beta_tensor, distribution, 3);
- library->fill(gamma_tensor, distribution, 4);
+ if(_use_beta)
+ {
+ library->fill(beta_tensor, distribution, 3);
+ }
+ else
+ {
+ // Fill with default value 0.f
+ library->fill_tensor_value(beta_tensor, 0.f);
+ }
+ if(_use_gamma)
+ {
+ library->fill(gamma_tensor, distribution, 4);
+ }
+ else
+ {
+ // Fill with default value 1.f
+ library->fill_tensor_value(gamma_tensor, 1.f);
+ }
}
else
{
@@ -80,8 +98,24 @@ protected:
library->fill(src_tensor, distribution, 0);
library->fill(mean_tensor, distribution, 1);
library->fill(var_tensor, distribution_var, 0);
- library->fill(beta_tensor, distribution, 3);
- library->fill(gamma_tensor, distribution, 4);
+ if(_use_beta)
+ {
+ library->fill(beta_tensor, distribution, 3);
+ }
+ else
+ {
+ // Fill with default value 0
+ library->fill_tensor_value(beta_tensor, static_cast<T>(0));
+ }
+ if(_use_gamma)
+ {
+ library->fill(gamma_tensor, distribution, 4);
+ }
+ else
+ {
+ // Fill with default value 1
+ library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits)));
+ }
}
}
@@ -97,7 +131,9 @@ protected:
// Create and configure function
FunctionType norm;
- norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info);
+ TensorType *beta_ptr = _use_beta ? &beta : nullptr;
+ TensorType *gamma_ptr = _use_gamma ? &gamma : nullptr;
+ norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon, act_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -149,6 +185,8 @@ protected:
SimpleTensor<T> _reference{};
int _fractional_bits{};
DataType _data_type{};
+ bool _use_beta{};
+ bool _use_gamma{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -156,9 +194,9 @@ class BatchNormalizationLayerValidationFixture : public BatchNormalizationLayerV
{
public:
template <typename...>
- void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
+ void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt)
{
- BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, act_info, dt, 0);
+ BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, 0);
}
};
} // namespace validation
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index a9d9f0320d..c8badacc79 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -106,7 +106,6 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp
const float numerator = src[pos] - mean[i];
const float x_bar = numerator / denominator;
result[pos] = beta[i] + x_bar * gamma[i];
- ;
}
}
}