aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/BatchNormalizationLayer.cpp
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/reference/BatchNormalizationLayer.cpp
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/reference/BatchNormalizationLayer.cpp')
-rw-r--r--tests/validation/reference/BatchNormalizationLayer.cpp55
1 files changed, 3 insertions, 52 deletions
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index c8badacc79..3d1a6ed7d7 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -36,56 +36,11 @@ namespace validation
{
namespace reference
{
-// Batch Normalization Layer for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *>
-SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info, int fixed_point_position)
-{
- ARM_COMPUTE_UNUSED(act_info);
- SimpleTensor<T> result(src.shape(), src.data_type());
-
- const auto cols = static_cast<int>(src.shape()[0]);
- const auto rows = static_cast<int>(src.shape()[1]);
- const auto depth = static_cast<int>(src.shape()[2]);
- const int upper_dims = src.shape().total_size() / (cols * rows * depth);
-
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int i = 0; i < depth; ++i)
- {
- for(int k = 0; k < rows; ++k)
- {
- for(int l = 0; l < cols; ++l)
- {
- const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
-
- fixed_point_arithmetic::fixed_point<T> src_qs(src[pos], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
- fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
-
- auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
- auto numerator = src_qs - mean_qs;
- auto x_bar = numerator * denominator;
- x_bar = beta_qs + x_bar * gamma_qs;
- result[pos] = x_bar.raw();
- }
- }
- }
- }
-
- return result;
-}
-
// Batch Normalization Layer for floating point type
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
- ActivationLayerInfo act_info, int fixed_point_position)
+ ActivationLayerInfo act_info)
{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
SimpleTensor<T> result(src.shape(), src.data_type());
const auto cols = static_cast<int>(src.shape()[0]);
@@ -119,14 +74,10 @@ SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const Simp
return result;
}
template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
- const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta,
- const SimpleTensor<int8_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
-template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta,
- const SimpleTensor<int16_t> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+ const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info);
template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
const SimpleTensor<half> &beta,
- const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info, int fixed_point_position);
+ const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info);
} // namespace reference
} // namespace validation