From 41caa625231c24533a514606bbf2683f7d4964ad Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 16 Nov 2017 14:37:08 +0000 Subject: COMPMID-683: Normalization layer API clarification. Adds a is_scaled parameter in the NormalizationLayerInfo that flags if the alpha parameter should be scaled by the normalization size of not. Unscaled parameter is used by [Krichevksy 2012] which is used in AndroidNN and TensorFlow LRN layer. Change-Id: Iad2aa5e688cf4dcd6cc77a6e28c0663764f34ccb Reviewed-on: http://mpd-gerrit.cambridge.arm.com/96102 Reviewed-by: Diego Lopez Recas Reviewed-by: Anthony Barbier Tested-by: Kaizen --- arm_compute/core/Types.h | 31 +++++++++++++++------- src/core/CL/kernels/CLNormalizationLayerKernel.cpp | 2 +- .../kernels/GCNormalizationLayerKernel.cpp | 2 +- .../NEON/kernels/NENormalizationLayerKernel.cpp | 2 +- tests/validation/CL/NormalizationLayer.cpp | 15 +++++++---- .../validation/GLES_COMPUTE/NormalizationLayer.cpp | 10 +++---- tests/validation/NEON/NormalizationLayer.cpp | 12 ++++++--- .../fixtures/NormalizationLayerFixture.h | 8 +++--- 8 files changed, 50 insertions(+), 32 deletions(-) diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index b0a284fe69..37f8508674 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -701,12 +701,14 @@ public: * * @param[in] type The normalization type. Can be @ref NormType::IN_MAP_1D, @ref NormType::IN_MAP_2D or @ref NORM_TYPE::CROSS_MAP * @param[in] norm_size The normalization size is the number of elements to normalize across. Defaults to 5. - * @param[in] alpha Alpha parameter used by normalization equation. Defaults to 0.0001. - * @param[in] beta Beta parameter used by normalization equation. Defaults to 0.5. - * @param[in] kappa Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation. + * @param[in] alpha (Optional) Alpha parameter used by normalization equation. Defaults to 0.0001. + * @param[in] beta (Optional) Beta parameter used by normalization equation. Defaults to 0.5. + * @param[in] kappa (Optional) Kappa parameter used by [Krichevksy 2012] Across Channel Local Brightness Normalization equation. + * @param[in] is_scaled (Optional) Boolean that specifies if alpha will be scaled by the normalization size or not. + * Should be false to follow [Krichevksy 2012]. */ - NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f) - : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa) + NormalizationLayerInfo(NormType type, uint32_t norm_size = 5, float alpha = 0.0001f, float beta = 0.5f, float kappa = 1.f, bool is_scaled = true) + : _type(type), _norm_size(norm_size), _alpha(alpha), _beta(beta), _kappa(kappa), _is_scaled(is_scaled) { } NormType type() const @@ -729,17 +731,25 @@ public: { return _kappa; } - /** Return the scaling factor of the normalization function. If kappa is not - * 1 then [Krichevksy 2012] normalization scaling is specified. Scaling - * factor takes into account the total number of elements used for the - * normalization, so in case of 2 dimensions this is _norm_size^2. + bool is_cross_map() const + { + return _type == NormType::CROSS_MAP; + } + bool is_in_map() const + { + return !is_cross_map(); + } + /** Return the scaling factor of the normalization function. + * + * If is_scaled is set to false then [Krichevksy 2012] normalization scaling is performed, + * where alpha is returned plainly, else alpha is scaled by the total number of elements used for the normalization. * * @return The normalization scaling factor. */ float scale_coeff() const { const uint32_t size = (_type == NormType::IN_MAP_2D) ? _norm_size * _norm_size : _norm_size; - return (_kappa == 1.f) ? (_alpha / size) : _alpha; + return (_is_scaled) ? (_alpha / size) : _alpha; } private: @@ -748,6 +758,7 @@ private: float _alpha; float _beta; float _kappa; + bool _is_scaled; }; /** Convolution Layer Weights Information class. This class stores the necessary information to compute convolution layer when the weights are already reshaped */ diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp index 1a7e38fab4..5d91065783 100644 --- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp @@ -68,7 +68,7 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou _input = input; _output = output; - _is_in_map = (norm_info.type() != NormType::CROSS_MAP); + _is_in_map = norm_info.is_in_map(); const unsigned int border_width = _is_in_map ? std::min(norm_info.norm_size() / 2, 3U) : 0; _border_size = BorderSize(0, border_width); diff --git a/src/core/GLES_COMPUTE/kernels/GCNormalizationLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCNormalizationLayerKernel.cpp index c0c2445c6f..5dad767d3e 100644 --- a/src/core/GLES_COMPUTE/kernels/GCNormalizationLayerKernel.cpp +++ b/src/core/GLES_COMPUTE/kernels/GCNormalizationLayerKernel.cpp @@ -60,7 +60,7 @@ void GCNormalizationLayerKernel::configure(const IGCTensor *input, const IGCTens _squared_input = squared_input; _output = output; - const bool is_in_map = (norm_info.type() == NormType::IN_MAP_1D); + const bool is_in_map = norm_info.is_in_map(); const unsigned int border_width = is_in_map ? std::min(norm_info.norm_size() / 2, 3U) : 0; _border_size = BorderSize(0, border_width); diff --git a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp index a409519114..b983609e49 100644 --- a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp +++ b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp @@ -61,7 +61,7 @@ void NENormalizationLayerKernel::configure(const ITensor *input, const ITensor * ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(norm_info.scale_coeff(), input); } - const unsigned int border_width = (norm_info.type() == NormType::CROSS_MAP) ? 0 : std::min(norm_info.norm_size() / 2, 3U); + const unsigned int border_width = (norm_info.is_cross_map()) ? 0 : std::min(norm_info.norm_size() / 2, 3U); _input = input; _input_squared = input_squared; diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp index 4fca6bf297..18f0c37ab6 100644 --- a/tests/validation/CL/NormalizationLayer.cpp +++ b/tests/validation/CL/NormalizationLayer.cpp @@ -52,9 +52,14 @@ constexpr AbsoluteTolerance tolerance_qs8(2); constexpr AbsoluteTolerance tolerance_qs16(3); /** Input data set. */ -const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), - framework::dataset::make("NormalizationSize", 3, 9, 2)), - framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })); +const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), + framework::dataset::make("NormalizationSize", 3, 9, 2)), + framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })), + framework::dataset::make("IsScaled", { true })); +const auto NormalizationDatasetFP32 = combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), + framework::dataset::make("NormalizationSize", 3, 9, 2)), + framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })), + framework::dataset::make("IsScaled", { true, false })); } // namespace TEST_SUITE(CL) @@ -80,12 +85,12 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLNormalizationLayerFixture, framework::D TEST_SUITE_END() TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLNormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(NormalizationDatasetFP32, framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLNormalizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(NormalizationDatasetFP32, framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); diff --git a/tests/validation/GLES_COMPUTE/NormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/NormalizationLayer.cpp index 4f6ae55677..4bd931e420 100644 --- a/tests/validation/GLES_COMPUTE/NormalizationLayer.cpp +++ b/tests/validation/GLES_COMPUTE/NormalizationLayer.cpp @@ -47,9 +47,10 @@ namespace constexpr AbsoluteTolerance tolerance_f32(0.00001f); /** Input data set. */ -const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), - framework::dataset::make("NormalizationSize", 3, 9, 2)), - framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })); +const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("NormType", { NormType::IN_MAP_1D, NormType::CROSS_MAP })), + framework::dataset::make("NormalizationSize", 3, 9, 2)), + framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })), + framework::dataset::make("IsScaled", { true, false })); } // namespace TEST_SUITE(GC) @@ -75,9 +76,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, GCNormalizationLayerFixture, framework:: TEST_SUITE_END() TEST_SUITE_END() -template -using GCNormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture; - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp index e22922cf8a..3afa52cb4c 100644 --- a/tests/validation/NEON/NormalizationLayer.cpp +++ b/tests/validation/NEON/NormalizationLayer.cpp @@ -53,8 +53,12 @@ constexpr AbsoluteTolerance tolerance_qs8(2); constexpr AbsoluteTolerance tolerance_qs16(4); /** Input data set. */ -const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), - framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })); +const auto NormalizationDataset = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), + framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })), + framework::dataset::make("IsScaled", { true })); +const auto NormalizationDatasetFP32 = combine(combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), + framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })), + framework::dataset::make("IsScaled", { true, false })); } // namespace TEST_SUITE(NEON) @@ -82,12 +86,12 @@ TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(NormalizationDatasetFP32, framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(NormalizationDatasetFP32, framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h index 696d14fbbb..67881d0347 100644 --- a/tests/validation/fixtures/NormalizationLayerFixture.h +++ b/tests/validation/fixtures/NormalizationLayerFixture.h @@ -47,10 +47,10 @@ class NormalizationValidationFixedPointFixture : public framework::Fixture { public: template - void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, DataType data_type, int fractional_bits) + void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, int fractional_bits) { _fractional_bits = fractional_bits; - NormalizationLayerInfo info(norm_type, norm_size, 5, beta); + NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled); _target = compute_target(shape, info, data_type, fractional_bits); _reference = compute_reference(shape, info, data_type, fractional_bits); @@ -122,9 +122,9 @@ class NormalizationValidationFixture : public NormalizationValidationFixedPointF { public: template - void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, DataType data_type) + void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type) { - NormalizationValidationFixedPointFixture::setup(shape, norm_type, norm_size, beta, data_type, 0); + NormalizationValidationFixedPointFixture::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, 0); } }; } // namespace validation -- cgit v1.2.1