From 6db73ce5222d4b27b06c4e4aa9e466ceb9a09ba2 Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Wed, 19 Jul 2017 10:18:42 +0100 Subject: COMPMID-415: Move NormalizationLayer to new validation Change-Id: Icf5781c920836fe87d2db27ca3f9cc4eb2bea554 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80999 Reviewed-by: Anthony Barbier Tested-by: Kaizen --- tests/TypePrinter.h | 13 +- tests/datasets_new/NormalizationTypesDataset.h | 49 ++++ tests/validation/NEON/NormalizationLayer.cpp | 177 ------------- tests/validation/Reference.cpp | 24 -- tests/validation/Reference.h | 10 - tests/validation/ReferenceCPP.cpp | 8 - tests/validation/ReferenceCPP.h | 7 - tests/validation/TensorOperations.h | 202 --------------- tests/validation/TensorVisitors.h | 20 -- tests/validation_new/CPP/NormalizationLayer.cpp | 274 +++++++++++++++++++++ tests/validation_new/CPP/NormalizationLayer.h | 47 ++++ tests/validation_new/NEON/NormalizationLayer.cpp | 125 ++++++++++ .../fixtures/NormalizationLayerFixture.h | 133 ++++++++++ 13 files changed, 638 insertions(+), 451 deletions(-) create mode 100644 tests/datasets_new/NormalizationTypesDataset.h delete mode 100644 tests/validation/NEON/NormalizationLayer.cpp create mode 100644 tests/validation_new/CPP/NormalizationLayer.cpp create mode 100644 tests/validation_new/CPP/NormalizationLayer.h create mode 100644 tests/validation_new/NEON/NormalizationLayer.cpp create mode 100644 tests/validation_new/fixtures/NormalizationLayerFixture.h diff --git a/tests/TypePrinter.h b/tests/TypePrinter.h index ed7933cacc..10d33882ce 100644 --- a/tests/TypePrinter.h +++ b/tests/TypePrinter.h @@ -240,7 +240,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo: return os; } -inline std::string to_string(const arm_compute::ActivationLayerInfo &info) +inline std::string to_string(const ActivationLayerInfo &info) { std::stringstream str; str << info.activation(); @@ -268,7 +268,14 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NormType &norm_type) return os; } -inline std::string to_string(const arm_compute::NormalizationLayerInfo &info) +inline std::string to_string(const NormType &type) +{ + std::stringstream str; + str << type; + return str.str(); +} + +inline std::string to_string(const NormalizationLayerInfo &info) { std::stringstream str; str << info.type(); @@ -379,7 +386,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const DataType &data_type) return os; } -inline std::string to_string(const arm_compute::DataType &data_type) +inline std::string to_string(const DataType &data_type) { std::stringstream str; str << data_type; diff --git a/tests/datasets_new/NormalizationTypesDataset.h b/tests/datasets_new/NormalizationTypesDataset.h new file mode 100644 index 0000000000..4e087e9eff --- /dev/null +++ b/tests/datasets_new/NormalizationTypesDataset.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_NORMALIZATION_TYPES_DATASET_H__ +#define __ARM_COMPUTE_TEST_NORMALIZATION_TYPES_DATASET_H__ + +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +namespace test +{ +namespace datasets +{ +class NormalizationTypes final : public framework::dataset::ContainerDataset> +{ +public: + NormalizationTypes() + : ContainerDataset("NormType", + { + NormType::IN_MAP_1D, NormType::IN_MAP_2D, NormType::CROSS_MAP + }) + { + } +}; +} // namespace datasets +} // namespace test +} // namespace arm_compute +#endif /* __ARM_COMPUTE_TEST_NORMALIZATION_TYPES_DATASET_H__ */ diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp deleted file mode 100644 index 8a5db369d1..0000000000 --- a/tests/validation/NEON/NormalizationLayer.cpp +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "NEON/Accessor.h" -#include "TypePrinter.h" -#include "tests/Globals.h" -#include "tests/Utils.h" -#include "validation/Datasets.h" -#include "validation/Reference.h" -#include "validation/Validation.h" - -#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h" - -#include - -using namespace arm_compute; -using namespace arm_compute::test; -using namespace arm_compute::test::validation; - -namespace -{ -/** Define tolerance of the normalization layer depending on values data type. - * - * @param[in] dt Data type of the tensors' values. - * - * @return Tolerance depending on the data type. - */ -float normalization_layer_tolerance(DataType dt) -{ - switch(dt) - { - case DataType::QS8: - return 2.0f; - case DataType::F16: - return 0.001f; - case DataType::F32: - return 1e-05; - default: - return 0.f; - } -} - -/** Compute Neon normalization layer function. - * - * @param[in] shape Shape of the input and output tensors. - * @param[in] dt Data type of input and output tensors. - * @param[in] norm_info Normalization Layer information. - * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0). - * - * @return Computed output tensor. - */ -Tensor compute_normalization_layer(const TensorShape &shape, DataType dt, NormalizationLayerInfo norm_info, int fixed_point_position = 0) -{ - // Create tensors - Tensor src = create_tensor(shape, dt, 1, fixed_point_position); - Tensor dst = create_tensor(shape, dt, 1, fixed_point_position); - - // Create and configure function - NENormalizationLayer norm; - norm.configure(&src, &dst, norm_info); - - // Allocate tensors - src.allocator()->allocate(); - dst.allocator()->allocate(); - - BOOST_TEST(!src.info()->is_resizable()); - BOOST_TEST(!dst.info()->is_resizable()); - - // Fill tensors - if(dt == DataType::QS8) - { - const int8_t one_fixed_point = 1 << fixed_point_position; - const int8_t minus_one_fixed_point = -one_fixed_point; - library->fill_tensor_uniform(Accessor(src), 0, minus_one_fixed_point, one_fixed_point); - } - else - { - library->fill_tensor_uniform(Accessor(src), 0); - } - - // Compute function - norm.run(); - - return dst; -} -} // namespace - -#ifndef DOXYGEN_SKIP_THIS -BOOST_AUTO_TEST_SUITE(NEON) -BOOST_AUTO_TEST_SUITE(NormalizationLayer) - -#ifdef ARM_COMPUTE_ENABLE_FP16 -BOOST_AUTO_TEST_SUITE(Float16) -BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) -BOOST_DATA_TEST_CASE(RunSmall, - SmallShapes() * DataType::F16 *NormalizationTypes() * boost::unit_test::data::xrange(3, 9, 2) * boost::unit_test::data::make({ 0.5f, 1.0f, 2.0f }), - shape, dt, norm_type, norm_size, beta) -{ - // Provide normalization layer information - NormalizationLayerInfo norm_info(norm_type, norm_size, 5, beta); - - // Compute function - Tensor dst = compute_normalization_layer(shape, dt, norm_info); - - // Compute reference - RawTensor ref_dst = Reference::compute_reference_normalization_layer(shape, dt, norm_info); - - // Validate output - validate(Accessor(dst), ref_dst, normalization_layer_tolerance(DataType::F16)); -} - -BOOST_AUTO_TEST_SUITE_END() -#endif /* ARM_COMPUTE_ENABLE_FP16 */ - -BOOST_AUTO_TEST_SUITE(Float) -BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) -BOOST_DATA_TEST_CASE(RunSmall, - SmallShapes() * DataType::F32 *NormalizationTypes() * boost::unit_test::data::xrange(3, 9, 2) * boost::unit_test::data::make({ 0.5f, 1.0f, 2.0f }), - shape, dt, norm_type, norm_size, beta) -{ - // Provide normalization layer information - NormalizationLayerInfo norm_info(norm_type, norm_size, 5, beta); - - // Compute function - Tensor dst = compute_normalization_layer(shape, dt, norm_info); - - // Compute reference - RawTensor ref_dst = Reference::compute_reference_normalization_layer(shape, dt, norm_info); - - // Validate output - validate(Accessor(dst), ref_dst, normalization_layer_tolerance(DataType::F32)); -} -BOOST_AUTO_TEST_SUITE_END() - -BOOST_AUTO_TEST_SUITE(Quantized) -BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) -BOOST_DATA_TEST_CASE(RunSmall, - SmallShapes() * DataType::QS8 *NormalizationTypes() * boost::unit_test::data::xrange(3, 7, 2) * (boost::unit_test::data::xrange(1, 6) * boost::unit_test::data::make({ 0.5f, 1.0f, 2.0f })), - shape, dt, norm_type, norm_size, fixed_point_position, beta) -{ - // Provide normalization layer information - NormalizationLayerInfo norm_info(norm_type, norm_size, 5, beta, 1.f); - - // Compute function - Tensor dst = compute_normalization_layer(shape, dt, norm_info, fixed_point_position); - - // Compute reference - RawTensor ref_dst = Reference::compute_reference_normalization_layer(shape, dt, norm_info, fixed_point_position); - - // Validate output - validate(Accessor(dst), ref_dst, normalization_layer_tolerance(DataType::QS8)); -} -BOOST_AUTO_TEST_SUITE_END() - -BOOST_AUTO_TEST_SUITE_END() -BOOST_AUTO_TEST_SUITE_END() -#endif /* DOXYGEN_SKIP_THIS */ diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp index 5c669903c8..b7553f3b7b 100644 --- a/tests/validation/Reference.cpp +++ b/tests/validation/Reference.cpp @@ -660,30 +660,6 @@ RawTensor Reference::compute_reference_fully_connected_layer(const TensorShape & return ref_dst; } -RawTensor Reference::compute_reference_normalization_layer(const TensorShape &shape, DataType dt, NormalizationLayerInfo norm_info, int fixed_point_position) -{ - // Create reference - RawTensor ref_src(shape, dt, 1, fixed_point_position); - RawTensor ref_dst(shape, dt, 1, fixed_point_position); - - // Fill reference - if(dt == DataType::QS8) - { - const int8_t one_fixed_point = 1 << fixed_point_position; - const int8_t minus_one_fixed_point = -one_fixed_point; - library->fill_tensor_uniform(ref_src, 0, minus_one_fixed_point, one_fixed_point); - } - else - { - library->fill_tensor_uniform(ref_src, 0); - } - - // Compute reference - ReferenceCPP::normalization_layer(ref_src, ref_dst, norm_info); - - return ref_dst; -} - RawTensor Reference::compute_reference_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position) { // Create reference diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h index 034a308327..778e7b0b2b 100644 --- a/tests/validation/Reference.h +++ b/tests/validation/Reference.h @@ -353,16 +353,6 @@ public: */ static RawTensor compute_reference_fully_connected_layer(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, DataType dt, bool transpose_weights, int fixed_point_position); - /** Compute reference normalization layer. - * - * @param[in] shape Shape of the input and output tensors. - * @param[in] dt Data type of input and output tensors. - * @param[in] norm_info Normalization Layer information. - * @param[in] fixed_point_position (Optional) Fixed point position that expresses the number of bits for the fractional part of the number when the tensor's data type is QS8 or QS16 (default = 0). - * - * @return Computed raw tensor. - */ - static RawTensor compute_reference_normalization_layer(const TensorShape &shape, DataType dt, NormalizationLayerInfo norm_info, int fixed_point_position = 0); /** Compute reference pooling layer. * * @param[in] shape_in Shape of the input tensor. diff --git a/tests/validation/ReferenceCPP.cpp b/tests/validation/ReferenceCPP.cpp index dd2437195e..13f4b90a82 100644 --- a/tests/validation/ReferenceCPP.cpp +++ b/tests/validation/ReferenceCPP.cpp @@ -337,14 +337,6 @@ void ReferenceCPP::fully_connected_layer(const RawTensor &src, const RawTensor & boost::apply_visitor(tensor_visitors::fully_connected_layer_visitor(s, w, b), d); } -// Normalization Layer -void ReferenceCPP::normalization_layer(const RawTensor &src, RawTensor &dst, NormalizationLayerInfo norm_info) -{ - const TensorVariant s = TensorFactory::get_tensor(src); - TensorVariant d = TensorFactory::get_tensor(dst); - boost::apply_visitor(tensor_visitors::normalization_layer_visitor(s, norm_info), d); -} - // Pooling Layer void ReferenceCPP::pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info, int fixed_point_position) { diff --git a/tests/validation/ReferenceCPP.h b/tests/validation/ReferenceCPP.h index 6d4d243c95..3f5e4aeaf5 100644 --- a/tests/validation/ReferenceCPP.h +++ b/tests/validation/ReferenceCPP.h @@ -296,13 +296,6 @@ public: * @param[out] dst Result tensor. */ static void fully_connected_layer(const RawTensor &src, const RawTensor &weights, const RawTensor &bias, RawTensor &dst); - /** Normalization of @p src based on the information from @p norm_info. - * - * @param[in] src Input tensor. - * @param[out] dst Result tensor. - * @param[in] norm_info Normalization Layer information. - */ - static void normalization_layer(const RawTensor &src, RawTensor &dst, NormalizationLayerInfo norm_info); /** Pooling layer of @p src based on the information from @p pool_info. * * @param[in] src Input tensor. diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h index 5018bfdb2b..a5039a4641 100644 --- a/tests/validation/TensorOperations.h +++ b/tests/validation/TensorOperations.h @@ -1207,208 +1207,6 @@ void fully_connected_layer(const Tensor &in, const Tensor &weights, const } } -// Normalization Layer for floating point type -template ::value, int>::type * = nullptr> -void normalization_layer(const Tensor &in, Tensor &out, NormalizationLayerInfo norm_info) -{ - const uint32_t norm_size = norm_info.norm_size(); - NormType type = norm_info.type(); - float beta = norm_info.beta(); - uint32_t kappa = norm_info.kappa(); - - const int cols = static_cast(in.shape()[0]); - const int rows = static_cast(in.shape()[1]); - const int depth = static_cast(in.shape()[2]); - int upper_dims = in.shape().total_size() / (cols * rows); - - float coeff = norm_info.scale_coeff(); - int radius_cols = norm_size / 2; - // IN_MAP_1D and CROSS_MAP normalize over a single axis only - int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0; - - if(type == NormType::CROSS_MAP) - { - // Remove also depth from upper dimensions since it is the axes we want - // to use for normalization - upper_dims /= depth; - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < rows; ++i) - { - for(int k = 0; k < cols; ++k) - { - for(int l = 0; l < depth; ++l) - { - float accumulated_scale = 0.f; - for(int j = -radius_cols; j <= radius_cols; ++j) - { - const int z = l + j; - if(z >= 0 && z < depth) - { - const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth]; - accumulated_scale += value * value; - } - } - out[k + i * cols + l * rows * cols + r * cols * rows * depth] = kappa + accumulated_scale * coeff; - } - } - } - } - } - else - { - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < rows; ++i) - { - for(int k = 0; k < cols; ++k) - { - float accumulated_scale = 0.f; - for(int j = -radius_rows; j <= radius_rows; ++j) - { - const int y = i + j; - for(int l = -radius_cols; l <= radius_cols; ++l) - { - const int x = k + l; - if((x >= 0 && y >= 0) && (x < cols && y < rows)) - { - const T value = in[x + y * cols + r * cols * rows]; - accumulated_scale += value * value; - } - } - } - out[k + i * cols + r * cols * rows] = kappa + accumulated_scale * coeff; - } - } - } - } - - if(beta == 1.f) - { - for(int i = 0; i < out.num_elements(); ++i) - { - out[i] = in[i] / out[i]; - } - } - else if(beta == 0.5f) - { - for(int i = 0; i < out.num_elements(); ++i) - { - out[i] = in[i] / std::sqrt(out[i]); - } - } - else - { - for(int i = 0; i < out.num_elements(); ++i) - { - out[i] = in[i] * std::exp(std::log(out[i]) * -beta); - } - } -} -// Normalization Layer for fixed-point types -template ::value, int>::type * = nullptr> -void normalization_layer(const Tensor &in, Tensor &out, NormalizationLayerInfo norm_info) -{ - using namespace fixed_point_arithmetic; - - const int fixed_point_position = in.fixed_point_position(); - - const uint32_t norm_size = norm_info.norm_size(); - NormType type = norm_info.type(); - fixed_point beta(norm_info.beta(), fixed_point_position); - fixed_point kappa(norm_info.kappa(), fixed_point_position); - - const int cols = static_cast(in.shape()[0]); - const int rows = static_cast(in.shape()[1]); - const int depth = static_cast(in.shape()[2]); - int upper_dims = in.shape().total_size() / (cols * rows); - - fixed_point coeff(norm_info.scale_coeff(), fixed_point_position); - int radius_cols = norm_size / 2; - // IN_MAP_1D and CROSS_MAP normalize over a single axis only - int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0; - - if(type == NormType::CROSS_MAP) - { - // Remove also depth from upper dimensions since it is the axes we want - // to use for normalization - upper_dims /= depth; - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < rows; ++i) - { - for(int k = 0; k < cols; ++k) - { - for(int l = 0; l < depth; ++l) - { - fixed_point accumulated_scale(0.f, fixed_point_position); - for(int j = -radius_cols; j <= radius_cols; ++j) - { - const int z = l + j; - if(z >= 0 && z < depth) - { - const T value = in[k + i * cols + z * rows * cols + r * cols * rows * depth]; - const fixed_point fp_value(value, fixed_point_position, true); - accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value)); - } - } - accumulated_scale = add(kappa, mul(accumulated_scale, coeff)); - out[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw(); - } - } - } - } - } - else - { - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < rows; ++i) - { - for(int k = 0; k < cols; ++k) - { - fixed_point accumulated_scale(0.f, fixed_point_position); - for(int j = -radius_rows; j <= radius_rows; ++j) - { - const int y = i + j; - for(int l = -radius_cols; l <= radius_cols; ++l) - { - const int x = k + l; - if((x >= 0 && y >= 0) && (x < cols && y < rows)) - { - const T value = in[x + y * cols + r * cols * rows]; - const fixed_point fp_value(value, fixed_point_position, true); - accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value)); - } - } - } - accumulated_scale = add(kappa, mul(accumulated_scale, coeff)); - out[k + i * cols + r * cols * rows] = accumulated_scale.raw(); - } - } - } - } - - if(norm_info.beta() == 1.f) - { - for(int i = 0; i < out.num_elements(); ++i) - { - fixed_point res = div(fixed_point(in[i], fixed_point_position, true), fixed_point(out[i], fixed_point_position, true)); - out[i] = res.raw(); - } - } - else - { - const fixed_point beta(norm_info.beta(), fixed_point_position); - for(int i = 0; i < out.num_elements(); ++i) - { - fixed_point res = pow(fixed_point(out[i], fixed_point_position, true), beta); - res = div(fixed_point(in[i], fixed_point_position, true), res); - out[i] = res.raw(); - } - } -} - // Pooling layer template void pooling_layer(const Tensor &in, Tensor &out, PoolingLayerInfo pool_info, int fixed_point_position) diff --git a/tests/validation/TensorVisitors.h b/tests/validation/TensorVisitors.h index bccb70a1d3..fa9c3ecbb8 100644 --- a/tests/validation/TensorVisitors.h +++ b/tests/validation/TensorVisitors.h @@ -345,26 +345,6 @@ private: const TensorVariant &_bias; }; -// Normalization Layer visitor -struct normalization_layer_visitor : public boost::static_visitor<> -{ -public: - explicit normalization_layer_visitor(const TensorVariant &in, NormalizationLayerInfo norm_info) - : _in(in), _norm_info(norm_info) - { - } - - template - void operator()(Tensor &out) const - { - const Tensor &in = boost::get>(_in); - tensor_operations::normalization_layer(in, out, _norm_info); - } - -private: - const TensorVariant &_in; - NormalizationLayerInfo _norm_info; -}; // Pooling layer struct pooling_layer_visitor : public boost::static_visitor<> { diff --git a/tests/validation_new/CPP/NormalizationLayer.cpp b/tests/validation_new/CPP/NormalizationLayer.cpp new file mode 100644 index 0000000000..72f49007cc --- /dev/null +++ b/tests/validation_new/CPP/NormalizationLayer.cpp @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "NormalizationLayer.h" + +#include "tests/validation_new/FixedPoint.h" +#include "tests/validation_new/half.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template ::value, int>::type> +SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info) +{ + // Create reference + SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + + // Compute reference + const uint32_t norm_size = info.norm_size(); + NormType type = info.type(); + float beta = info.beta(); + uint32_t kappa = info.kappa(); + + const int cols = src.shape()[0]; + const int rows = src.shape()[1]; + const int depth = src.shape()[2]; + int upper_dims = src.shape().total_size() / (cols * rows); + + float coeff = info.scale_coeff(); + int radius_cols = norm_size / 2; + + // IN_MAP_1D and CROSS_MAP normalize over a single axis only + int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0; + + if(type == NormType::CROSS_MAP) + { + // Remove also depth from upper dimensions since it is the dimension we + // want to use for normalization + upper_dims /= depth; + + for(int r = 0; r < upper_dims; ++r) + { + for(int i = 0; i < rows; ++i) + { + for(int k = 0; k < cols; ++k) + { + for(int l = 0; l < depth; ++l) + { + float accumulated_scale = 0.f; + + for(int j = -radius_cols; j <= radius_cols; ++j) + { + const int z = l + j; + + if(z >= 0 && z < depth) + { + const T value = src[k + i * cols + z * rows * cols + r * cols * rows * depth]; + accumulated_scale += value * value; + } + } + + dst[k + i * cols + l * rows * cols + r * cols * rows * depth] = kappa + accumulated_scale * coeff; + } + } + } + } + } + else + { + for(int r = 0; r < upper_dims; ++r) + { + for(int i = 0; i < rows; ++i) + { + for(int k = 0; k < cols; ++k) + { + float accumulated_scale = 0.f; + + for(int j = -radius_rows; j <= radius_rows; ++j) + { + const int y = i + j; + for(int l = -radius_cols; l <= radius_cols; ++l) + { + const int x = k + l; + + if((x >= 0 && y >= 0) && (x < cols && y < rows)) + { + const T value = src[x + y * cols + r * cols * rows]; + accumulated_scale += value * value; + } + } + } + + dst[k + i * cols + r * cols * rows] = kappa + accumulated_scale * coeff; + } + } + } + } + + if(beta == 1.f) + { + for(int i = 0; i < dst.num_elements(); ++i) + { + dst[i] = src[i] / dst[i]; + } + } + else if(beta == 0.5f) + { + for(int i = 0; i < dst.num_elements(); ++i) + { + dst[i] = src[i] / std::sqrt(dst[i]); + } + } + else + { + for(int i = 0; i < dst.num_elements(); ++i) + { + dst[i] = src[i] * std::exp(std::log(dst[i]) * -beta); + } + } + + return dst; +} + +template ::value, int>::type> +SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info) +{ + using namespace fixed_point_arithmetic; + + // Create reference + SimpleTensor dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + + // Compute reference + const int fixed_point_position = src.fixed_point_position(); + + const uint32_t norm_size = info.norm_size(); + NormType type = info.type(); + fixed_point beta(info.beta(), fixed_point_position); + fixed_point kappa(info.kappa(), fixed_point_position); + + const int cols = src.shape()[0]; + const int rows = src.shape()[1]; + const int depth = src.shape()[2]; + int upper_dims = src.shape().total_size() / (cols * rows); + + fixed_point coeff(info.scale_coeff(), fixed_point_position); + int radius_cols = norm_size / 2; + + // IN_MAP_1D and CROSS_MAP normalize over a single axis only + int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0; + + if(type == NormType::CROSS_MAP) + { + // Remove also depth from upper dimensions since it is the dimension we + // want to use for normalization + upper_dims /= depth; + + for(int r = 0; r < upper_dims; ++r) + { + for(int i = 0; i < rows; ++i) + { + for(int k = 0; k < cols; ++k) + { + for(int l = 0; l < depth; ++l) + { + fixed_point accumulated_scale(0.f, fixed_point_position); + + for(int j = -radius_cols; j <= radius_cols; ++j) + { + const int z = l + j; + + if(z >= 0 && z < depth) + { + const T value = src[k + i * cols + z * rows * cols + r * cols * rows * depth]; + const fixed_point fp_value(value, fixed_point_position, true); + accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value)); + } + } + + accumulated_scale = add(kappa, mul(accumulated_scale, coeff)); + dst[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw(); + } + } + } + } + } + else + { + for(int r = 0; r < upper_dims; ++r) + { + for(int i = 0; i < rows; ++i) + { + for(int k = 0; k < cols; ++k) + { + fixed_point accumulated_scale(0.f, fixed_point_position); + + for(int j = -radius_rows; j <= radius_rows; ++j) + { + const int y = i + j; + + for(int l = -radius_cols; l <= radius_cols; ++l) + { + const int x = k + l; + + if((x >= 0 && y >= 0) && (x < cols && y < rows)) + { + const T value = src[x + y * cols + r * cols * rows]; + const fixed_point fp_value(value, fixed_point_position, true); + accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value)); + } + } + } + + accumulated_scale = add(kappa, mul(accumulated_scale, coeff)); + dst[k + i * cols + r * cols * rows] = accumulated_scale.raw(); + } + } + } + } + + if(info.beta() == 1.f) + { + for(int i = 0; i < dst.num_elements(); ++i) + { + fixed_point res = div(fixed_point(src[i], fixed_point_position, true), fixed_point(dst[i], fixed_point_position, true)); + dst[i] = res.raw(); + } + } + else + { + const fixed_point beta(info.beta(), fixed_point_position); + + for(int i = 0; i < dst.num_elements(); ++i) + { + fixed_point res = pow(fixed_point(dst[i], fixed_point_position, true), beta); + res = div(fixed_point(src[i], fixed_point_position, true), res); + dst[i] = res.raw(); + } + } + + return dst; +} + +template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); +template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); +template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation_new/CPP/NormalizationLayer.h b/tests/validation_new/CPP/NormalizationLayer.h new file mode 100644 index 0000000000..54284b1d50 --- /dev/null +++ b/tests/validation_new/CPP/NormalizationLayer.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_NORMALIZATION_LAYER_H__ +#define __ARM_COMPUTE_TEST_NORMALIZATION_LAYER_H__ + +#include "tests/validation_new/Helpers.h" +#include "tests/validation_new/SimpleTensor.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template ::value, int>::type = 0> +SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); + +template ::value, int>::type = 0> +SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* __ARM_COMPUTE_TEST_NORMALIZATION_LAYER_H__ */ diff --git a/tests/validation_new/NEON/NormalizationLayer.cpp b/tests/validation_new/NEON/NormalizationLayer.cpp new file mode 100644 index 0000000000..f364975332 --- /dev/null +++ b/tests/validation_new/NEON/NormalizationLayer.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "framework/Asserts.h" +#include "framework/Macros.h" +#include "framework/datasets/Datasets.h" +#include "tests/NEON/Accessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets_new/NormalizationTypesDataset.h" +#include "tests/datasets_new/ShapeDatasets.h" +#include "tests/validation_new/Validation.h" +#include "tests/validation_new/fixtures/NormalizationLayerFixture.h" +#include "tests/validation_new/half.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +/** Tolerance for float operations */ +#ifdef ARM_COMPUTE_ENABLE_FP16 +constexpr float tolerance_f16 = 0.001f; +#endif /* ARM_COMPUTE_ENABLE_FP16 */ +constexpr float tolerance_f32 = 0.00001f; +/** Tolerance for fixed point operations */ +constexpr int8_t tolerance_qs8 = 2; + +/** Input data set. */ +const auto NormalizationDataset = combine(combine(combine(datasets::SmallShapes(), datasets::NormalizationTypes()), framework::dataset::make("NormalizationSize", 3, 9, 2)), + framework::dataset::make("Beta", { 0.5f, 1.f, 2.f })); +} // namespace + +TEST_SUITE(NEON) +TEST_SUITE(NormalizationLayer) + +//TODO(COMPMID-415): Missing configuration? + +template +using NENormalizationLayerFixture = NormalizationValidationFixture; + +TEST_SUITE(Float) +#ifdef ARM_COMPUTE_ENABLE_FP16 +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} +TEST_SUITE_END() +#endif /* ARM_COMPUTE_ENABLE_FP16 */ + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(NormalizationDataset, framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} +TEST_SUITE_END() +TEST_SUITE_END() + +template +using NENormalizationLayerFixedPointFixture = NormalizationValidationFixedPointFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QS8) +// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5 +FIXTURE_DATA_TEST_CASE(RunSmall, NENormalizationLayerFixedPointFixture, framework::DatasetMode::PRECOMMIT, combine(combine(NormalizationDataset, framework::dataset::make("DataType", + DataType::QS8)), + framework::dataset::make("FractionalBits", 1, 6))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qs8); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NENormalizationLayerFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(combine(NormalizationDataset, framework::dataset::make("DataType", + DataType::QS8)), + framework::dataset::make("FractionalBits", 1, 6))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qs8); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation_new/fixtures/NormalizationLayerFixture.h b/tests/validation_new/fixtures/NormalizationLayerFixture.h new file mode 100644 index 0000000000..044405473b --- /dev/null +++ b/tests/validation_new/fixtures/NormalizationLayerFixture.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_NORMALIZATION_LAYER_FIXTURE +#define ARM_COMPUTE_TEST_NORMALIZATION_LAYER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/Tensor.h" +#include "framework/Asserts.h" +#include "framework/Fixture.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/validation_new/CPP/NormalizationLayer.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template +class NormalizationValidationFixedPointFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, DataType data_type, int fractional_bits) + { + _fractional_bits = fractional_bits; + NormalizationLayerInfo info(norm_type, norm_size, 5, beta); + + _target = compute_target(shape, info, data_type, fractional_bits); + _reference = compute_reference(shape, info, data_type, fractional_bits); + } + +protected: + template + void fill(U &&tensor) + { + if(_fractional_bits == 0) + { + library->fill_tensor_uniform(tensor, 0); + } + else + { + const int one_fixed = 1 << _fractional_bits; + std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); + library->fill(tensor, distribution, 0); + } + } + + TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0) + { + // Create tensors + TensorType src = create_tensor(shape, data_type, 1, fixed_point_position); + TensorType dst = create_tensor(shape, data_type, 1, fixed_point_position); + + // Create and configure function + FunctionType norm_layer; + norm_layer.configure(&src, &dst, info); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src)); + + // Compute function + norm_layer.run(); + + return dst; + } + + SimpleTensor compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0) + { + // Create reference + SimpleTensor src{ shape, data_type, 1, fixed_point_position }; + + // Fill reference + fill(src); + + return reference::normalization_layer(src, info); + } + + TensorType _target{}; + SimpleTensor _reference{}; + int _fractional_bits{}; +}; + +template +class NormalizationValidationFixture : public NormalizationValidationFixedPointFixture +{ +public: + template + void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, DataType data_type) + { + NormalizationValidationFixedPointFixture::setup(shape, norm_type, norm_size, beta, data_type, 0); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_NORMALIZATION_LAYER_FIXTURE */ -- cgit v1.2.1