aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/FuseBatchNormalizationFixture.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures/FuseBatchNormalizationFixture.h')
-rw-r--r--tests/validation/fixtures/FuseBatchNormalizationFixture.h35
1 files changed, 17 insertions, 18 deletions
diff --git a/tests/validation/fixtures/FuseBatchNormalizationFixture.h b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
index 780b4a0fb3..a05e4169a7 100644
--- a/tests/validation/fixtures/FuseBatchNormalizationFixture.h
+++ b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, int
class FuseBatchNormalizationFixture : public framework::Fixture
{
public:
- template <typename...>
void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta)
{
std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta);
@@ -96,14 +95,14 @@ protected:
FunctionType fuse_batch_normalization;
fuse_batch_normalization.configure(&w, &mean, &var, w_fused_to_use, b_fused_to_use, b_to_use, beta_to_use, gamma_to_use, _epsilon, fuse_bn_type);
- ARM_COMPUTE_EXPECT(w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(w_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(b_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(w_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(gamma.info()->is_resizable());
// Allocate tensors
w.allocator()->allocate();
@@ -115,14 +114,14 @@ protected:
beta.allocator()->allocate();
gamma.allocator()->allocate();
- ARM_COMPUTE_EXPECT(!w.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!w_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!b_fused.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_ASSERT(!w.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!mean.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!var.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!w_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b_fused.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!beta.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!gamma.info()->is_resizable());
// Fill tensors
fill(AccessorType(w), 0U, -1.0f, 1.0f);