aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/NormalizationLayerFixture.h
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-11-16 14:37:08 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit41caa625231c24533a514606bbf2683f7d4964ad (patch)
tree4e897dfbcc77d57c996f15bab014b20e4cb4868d /tests/validation/fixtures/NormalizationLayerFixture.h
parent181e65145d153210ec5587a42d2938e27e1d5b01 (diff)
downloadComputeLibrary-41caa625231c24533a514606bbf2683f7d4964ad.tar.gz
COMPMID-683: Normalization layer API clarification.
Adds a is_scaled parameter in the NormalizationLayerInfo that flags if the alpha parameter should be scaled by the normalization size of not. Unscaled parameter is used by [Krichevksy 2012] which is used in AndroidNN and TensorFlow LRN layer. Change-Id: Iad2aa5e688cf4dcd6cc77a6e28c0663764f34ccb Reviewed-on: http://mpd-gerrit.cambridge.arm.com/96102 Reviewed-by: Diego Lopez Recas <diego.lopezrecas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation/fixtures/NormalizationLayerFixture.h')
-rw-r--r--tests/validation/fixtures/NormalizationLayerFixture.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h
index 696d14fbbb..67881d0347 100644
--- a/tests/validation/fixtures/NormalizationLayerFixture.h
+++ b/tests/validation/fixtures/NormalizationLayerFixture.h
@@ -47,10 +47,10 @@ class NormalizationValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, DataType data_type, int fractional_bits)
+ void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, int fractional_bits)
{
_fractional_bits = fractional_bits;
- NormalizationLayerInfo info(norm_type, norm_size, 5, beta);
+ NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled);
_target = compute_target(shape, info, data_type, fractional_bits);
_reference = compute_reference(shape, info, data_type, fractional_bits);
@@ -122,9 +122,9 @@ class NormalizationValidationFixture : public NormalizationValidationFixedPointF
{
public:
template <typename...>
- void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, DataType data_type)
+ void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type)
{
- NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, data_type, 0);
+ NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, 0);
}
};
} // namespace validation