diff options
author | Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> | 2018-07-02 09:13:49 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:54:10 +0000 |
commit | 014333d73883c3872e458cedda5ccef586a7ccd4 (patch) | |
tree | 0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/fixtures/ActivationLayerFixture.h | |
parent | de01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff) | |
download | ComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz |
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources
Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/fixtures/ActivationLayerFixture.h')
-rw-r--r-- | tests/validation/fixtures/ActivationLayerFixture.h | 37 |
1 files changed, 12 insertions, 25 deletions
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index e212c7bd9b..d29d67c8e6 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,17 +47,16 @@ class ActivationValidationGenericFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits, QuantizationInfo quantization_info) + void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_type = data_type; _function = function; ActivationLayerInfo info(function, alpha_beta, alpha_beta); - _target = compute_target(shape, in_place, info, data_type, fractional_bits, quantization_info); - _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info); + _target = compute_target(shape, in_place, info, data_type, quantization_info); + _reference = compute_reference(shape, info, data_type, quantization_info); } protected: @@ -80,17 +79,17 @@ protected: { int min_bound = 0; int max_bound = 0; - std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type, _fractional_bits); + std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type); std::uniform_int_distribution<> distribution(min_bound, max_bound); library->fill(tensor, distribution, 0); } } - TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info); - TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info); + TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info); + TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info); // Create and configure function FunctionType act_layer; @@ -128,10 +127,10 @@ protected: } } - SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> src{ shape, data_type, 1, quantization_info }; // Fill reference fill(src); @@ -141,7 +140,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; DataType _data_type{}; ActivationLayerInfo::ActivationFunction _function{}; @@ -154,18 +152,7 @@ public: template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type) { - ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, QuantizationInfo()); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ActivationValidationFixedPointFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits) - { - ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, fractional_bits, QuantizationInfo()); + ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo()); } }; @@ -176,7 +163,7 @@ public: template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { - ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, quantization_info); + ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info); } }; |