aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/BatchNormalizationLayerFixture.h
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-09-26 14:39:39 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:55:19 +0000
commit3f6a57a39ca1d19e737d169fd43766243bde4a92 (patch)
treed9f55119291e53dc82597952be103594d497ab46 /tests/validation/fixtures/BatchNormalizationLayerFixture.h
parent932491f44d51940d82514417a82e43cb11b06bd4 (diff)
downloadComputeLibrary-3f6a57a39ca1d19e737d169fd43766243bde4a92.tar.gz
COMPMID-1599: (Nightly) CL/NormalizePlanarYUVLayer/Quantized/QASYMM8 mismatches
Fixing bounds of random values for Normalize Planar YUV tests when using QASYMM8. Furthermore, since 70d252d8b4 a QASYMM8 implementation of Batch Normalization would have been tested with tensors filled with all 1s. This patch removes that as QASYMM8 Batch Normalization is not supported. Change-Id: Ieab83ed36b2d7af760ceb19a07d1eedcc991957f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/150492 Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: bsgcomp <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/BatchNormalizationLayerFixture.h')
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h73
1 files changed, 20 insertions, 53 deletions
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index 65e89b92da..359752f14e 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -59,63 +59,30 @@ protected:
template <typename U>
void fill(U &&src_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
{
- if(is_data_type_float(_data_type))
+ const float min_bound = -1.f;
+ const float max_bound = 1.f;
+ std::uniform_real_distribution<> distribution(min_bound, max_bound);
+ std::uniform_real_distribution<> distribution_var(0, max_bound);
+ library->fill(src_tensor, distribution, 0);
+ library->fill(mean_tensor, distribution, 1);
+ library->fill(var_tensor, distribution_var, 0);
+ if(_use_beta)
{
- float min_bound = 0.f;
- float max_bound = 0.f;
- std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>();
- std::uniform_real_distribution<> distribution(min_bound, max_bound);
- std::uniform_real_distribution<> distribution_var(0, max_bound);
- library->fill(src_tensor, distribution, 0);
- library->fill(mean_tensor, distribution, 1);
- library->fill(var_tensor, distribution_var, 0);
- if(_use_beta)
- {
- library->fill(beta_tensor, distribution, 3);
- }
- else
- {
- // Fill with default value 0.f
- library->fill_tensor_value(beta_tensor, 0.f);
- }
- if(_use_gamma)
- {
- library->fill(gamma_tensor, distribution, 4);
- }
- else
- {
- // Fill with default value 1.f
- library->fill_tensor_value(gamma_tensor, 1.f);
- }
+ library->fill(beta_tensor, distribution, 3);
}
else
{
- int min_bound = 0;
- int max_bound = 0;
- std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>();
- std::uniform_int_distribution<> distribution(min_bound, max_bound);
- std::uniform_int_distribution<> distribution_var(0, max_bound);
- library->fill(src_tensor, distribution, 0);
- library->fill(mean_tensor, distribution, 1);
- library->fill(var_tensor, distribution_var, 0);
- if(_use_beta)
- {
- library->fill(beta_tensor, distribution, 3);
- }
- else
- {
- // Fill with default value 0
- library->fill_tensor_value(beta_tensor, static_cast<T>(0));
- }
- if(_use_gamma)
- {
- library->fill(gamma_tensor, distribution, 4);
- }
- else
- {
- // Fill with default value 1
- library->fill_tensor_value(gamma_tensor, static_cast<T>(1));
- }
+ // Fill with default value 0.f
+ library->fill_tensor_value(beta_tensor, 0.f);
+ }
+ if(_use_gamma)
+ {
+ library->fill(gamma_tensor, distribution, 4);
+ }
+ else
+ {
+ // Fill with default value 1.f
+ library->fill_tensor_value(gamma_tensor, 1.f);
}
}