aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h')
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h56
1 files changed, 29 insertions, 27 deletions
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
index 39c7d46114..161eeb0ef4 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename ConvolutionFuncti
class BatchNormalizationLayerFusionValidationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation,
bool use_conv_b, bool use_beta, bool use_gamma, float epsilon, DataType dt, DataLayout data_layout)
{
@@ -65,16 +64,19 @@ protected:
template <typename U>
void fill(U &&src, U &&w_tensor, U &&b_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
{
- std::uniform_real_distribution<> distribution(-1.f, 1.f);
- std::uniform_real_distribution<> distribution_gz(0, 1.f);
+ static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported.");
+ using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type;
+
+ DistributionType distribution{ T(-1.f), T(1.f) };
+ DistributionType distribution_gz{ T(0.f), T(1.f) };
library->fill(src, distribution, 0);
library->fill(w_tensor, distribution, 1);
library->fill(mean_tensor, distribution, 2);
library->fill(var_tensor, distribution_gz, 3);
- _use_conv_b ? library->fill(b_tensor, distribution, 4) : library->fill_tensor_value(b_tensor, 0.f);
- _use_beta ? library->fill(beta_tensor, distribution, 5) : library->fill_tensor_value(beta_tensor, 0.f);
- _use_gamma ? library->fill(gamma_tensor, distribution, 6) : library->fill_tensor_value(gamma_tensor, 1.f);
+ _use_conv_b ? library->fill(b_tensor, distribution, 4) : library->fill_tensor_value(b_tensor, T(0.f));
+ _use_beta ? library->fill(beta_tensor, distribution, 5) : library->fill_tensor_value(beta_tensor, T(0.f));
+ _use_gamma ? library->fill(gamma_tensor, distribution, 6) : library->fill_tensor_value(gamma_tensor, T(1.f));
}
TensorType compute_target(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, float epsilon)
@@ -107,16 +109,16 @@ protected:
fuse_fn.configure(&conv_w, &bn_mean, &bn_var, &fused_w, &fused_b, conv_b_ptr, beta_ptr, gamma_ptr, epsilon);
conv_fn.configure(&src, &fused_w, &fused_b, &dst, info);
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(conv_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(conv_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(fused_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(fused_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(conv_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(conv_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bn_gamma.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(fused_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(fused_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
// Allocate tensors
src.allocator()->allocate();
@@ -130,16 +132,16 @@ protected:
fused_b.allocator()->allocate();
dst.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!conv_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!conv_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!fused_w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!fused_b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!conv_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!conv_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bn_gamma.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!fused_w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!fused_b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// Fill tensors
fill(AccessorType(src),