aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/BatchNormalizationLayerFixture.h
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/fixtures/BatchNormalizationLayerFixture.h
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/fixtures/BatchNormalizationLayerFixture.h')
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h48
1 files changed, 23 insertions, 25 deletions
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index b7e32a6f37..bc3b488a4a 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -45,15 +45,14 @@ class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fix
{
public:
template <typename...>
- void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fractional_bits)
+ void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
- _fractional_bits = fractional_bits;
- _data_type = dt;
- _use_beta = use_beta;
- _use_gamma = use_gamma;
+ _data_type = dt;
+ _use_beta = use_beta;
+ _use_gamma = use_gamma;
- _target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout, fractional_bits);
- _reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits);
+ _target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout);
+ _reference = compute_reference(shape0, shape1, epsilon, act_info, dt);
}
protected:
@@ -93,7 +92,7 @@ protected:
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>(_fractional_bits);
+ std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>();
std::uniform_int_distribution<> distribution(min_bound, max_bound);
std::uniform_int_distribution<> distribution_var(0, max_bound);
library->fill(src_tensor, distribution, 0);
@@ -115,12 +114,12 @@ protected:
else
{
// Fill with default value 1
- library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits)));
+ library->fill_tensor_value(gamma_tensor, static_cast<T>(1));
}
}
}
- TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fixed_point_position)
+ TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
if(data_layout == DataLayout::NHWC)
{
@@ -128,12 +127,12 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout);
- TensorType mean = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
- TensorType var = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
- TensorType beta = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
- TensorType gamma = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
+ TensorType mean = create_tensor<TensorType>(shape1, dt, 1);
+ TensorType var = create_tensor<TensorType>(shape1, dt, 1);
+ TensorType beta = create_tensor<TensorType>(shape1, dt, 1);
+ TensorType gamma = create_tensor<TensorType>(shape1, dt, 1);
// Create and configure function
FunctionType norm;
@@ -172,24 +171,23 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
{
// Create reference
- SimpleTensor<T> ref_src{ shape0, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_mean{ shape1, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_var{ shape1, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_beta{ shape1, dt, 1, fixed_point_position };
- SimpleTensor<T> ref_gamma{ shape1, dt, 1, fixed_point_position };
+ SimpleTensor<T> ref_src{ shape0, dt, 1 };
+ SimpleTensor<T> ref_mean{ shape1, dt, 1 };
+ SimpleTensor<T> ref_var{ shape1, dt, 1 };
+ SimpleTensor<T> ref_beta{ shape1, dt, 1 };
+ SimpleTensor<T> ref_gamma{ shape1, dt, 1 };
// Fill reference
fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma);
- return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info, fixed_point_position);
+ return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info);
}
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
DataType _data_type{};
bool _use_beta{};
bool _use_gamma{};
@@ -202,7 +200,7 @@ public:
template <typename...>
void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
- BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout, 0);
+ BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout);
}
};
} // namespace validation