diff options
author | Giorgio Arena <giorgio.arena@arm.com> | 2018-02-07 15:38:12 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:47:18 +0000 |
commit | 1167487ea8e54a76d0a3625e0aa84e2ad9ffd317 (patch) | |
tree | 287dbc45e895c6b637fecc692c04bd4ae59580ae /tests/validation/reference/BatchNormalizationLayer.cpp | |
parent | 4e1e7dcd581adecd5ad9c0f9503fc3c43f8222ef (diff) | |
download | ComputeLibrary-1167487ea8e54a76d0a3625e0aa84e2ad9ffd317.tar.gz |
COMPMID-897 Merge batch normalization with bounded relu
Change-Id: I9a607fe620f795cdea1a99fdd3f5f8c2fc76f980
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/119234
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/validation/reference/BatchNormalizationLayer.cpp')
-rw-r--r-- | tests/validation/reference/BatchNormalizationLayer.cpp | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp index e4446d1694..a9d9f0320d 100644 --- a/tests/validation/reference/BatchNormalizationLayer.cpp +++ b/tests/validation/reference/BatchNormalizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -23,6 +23,8 @@ */ #include "BatchNormalizationLayer.h" +#include "ActivationLayer.h" + #include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" @@ -37,8 +39,9 @@ namespace reference // Batch Normalization Layer for fixed point type template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *> SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon, - int fixed_point_position) + ActivationLayerInfo act_info, int fixed_point_position) { + ARM_COMPUTE_UNUSED(act_info); SimpleTensor<T> result(src.shape(), src.data_type()); const auto cols = static_cast<int>(src.shape()[0]); @@ -79,7 +82,7 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp // Batch Normalization Layer for floating point type template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *> SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon, - int fixed_point_position) + ActivationLayerInfo act_info, int fixed_point_position) { ARM_COMPUTE_UNUSED(fixed_point_position); @@ -103,21 +106,28 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp const float numerator = src[pos] - mean[i]; const float x_bar = numerator / denominator; result[pos] = beta[i] + x_bar * gamma[i]; + ; } } } } + + if(act_info.enabled()) + { + result = activation_layer(result, act_info); + } + return result; } template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta, - const SimpleTensor<float> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta, - const SimpleTensor<int8_t> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<int8_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta, - const SimpleTensor<int16_t> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<int16_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var, const SimpleTensor<half> &beta, - const SimpleTensor<half> &gamma, float epsilon, int fixed_point_position); + const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position); } // namespace reference } // namespace validation |