diff options
author | Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> | 2018-07-02 09:13:49 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:54:10 +0000 |
commit | 014333d73883c3872e458cedda5ccef586a7ccd4 (patch) | |
tree | 0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/fixtures | |
parent | de01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff) | |
download | ComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz |
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources
Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/fixtures')
24 files changed, 275 insertions, 350 deletions
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index e212c7bd9b..d29d67c8e6 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,17 +47,16 @@ class ActivationValidationGenericFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits, QuantizationInfo quantization_info) + void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_type = data_type; _function = function; ActivationLayerInfo info(function, alpha_beta, alpha_beta); - _target = compute_target(shape, in_place, info, data_type, fractional_bits, quantization_info); - _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info); + _target = compute_target(shape, in_place, info, data_type, quantization_info); + _reference = compute_reference(shape, info, data_type, quantization_info); } protected: @@ -80,17 +79,17 @@ protected: { int min_bound = 0; int max_bound = 0; - std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type, _fractional_bits); + std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type); std::uniform_int_distribution<> distribution(min_bound, max_bound); library->fill(tensor, distribution, 0); } } - TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info); - TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info); + TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info); + TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info); // Create and configure function FunctionType act_layer; @@ -128,10 +127,10 @@ protected: } } - SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> src{ shape, data_type, 1, quantization_info }; // Fill reference fill(src); @@ -141,7 +140,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; DataType _data_type{}; ActivationLayerInfo::ActivationFunction _function{}; @@ -154,18 +152,7 @@ public: template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type) { - ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, QuantizationInfo()); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ActivationValidationFixedPointFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits) - { - ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, fractional_bits, QuantizationInfo()); + ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo()); } }; @@ -176,7 +163,7 @@ public: template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { - ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, quantization_info); + ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info); } }; diff --git a/tests/validation/fixtures/ArithmeticAdditionFixture.h b/tests/validation/fixtures/ArithmeticAdditionFixture.h index 99a56777a2..6d529a843c 100644 --- a/tests/validation/fixtures/ArithmeticAdditionFixture.h +++ b/tests/validation/fixtures/ArithmeticAdditionFixture.h @@ -45,11 +45,11 @@ class ArithmeticAdditionGenericFixture : public framework::Fixture { public: template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits, + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, QuantizationInfo quantization_info) { - _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, quantization_info); - _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, quantization_info); + _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info); + _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info); } protected: @@ -60,12 +60,12 @@ protected: } TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - int fixed_point_position, QuantizationInfo quantization_info) + QuantizationInfo quantization_info) { // Create tensors - TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, fixed_point_position, quantization_info); - TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, fixed_point_position, quantization_info); - TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, fixed_point_position, quantization_info); + TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, quantization_info); + TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, quantization_info); + TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, quantization_info); // Create and configure function FunctionType add; @@ -95,11 +95,11 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - int fixed_point_position, QuantizationInfo quantization_info) + QuantizationInfo quantization_info) { // Create reference - SimpleTensor<T> ref_src1{ shape0, data_type0, 1, fixed_point_position, quantization_info }; - SimpleTensor<T> ref_src2{ shape1, data_type1, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> ref_src1{ shape0, data_type0, 1, quantization_info }; + SimpleTensor<T> ref_src2{ shape1, data_type1, 1, quantization_info }; // Fill reference fill(ref_src1, 0); @@ -117,9 +117,9 @@ class ArithmeticAdditionBroadcastValidationFixedPointFixture : public Arithmetic { public: template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0, QuantizationInfo()); + ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo()); } }; @@ -130,7 +130,7 @@ public: template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0, QuantizationInfo()); + ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo()); } }; @@ -139,9 +139,9 @@ class ArithmeticAdditionValidationFixedPointFixture : public ArithmeticAdditionG { public: template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits, QuantizationInfo()); + ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo()); } }; @@ -152,7 +152,7 @@ public: template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0); + ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy); } }; @@ -163,7 +163,7 @@ public: template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, QuantizationInfo quantization_info) { - ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, 0, quantization_info); + ArithmeticAdditionGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, quantization_info); } }; } // namespace validation diff --git a/tests/validation/fixtures/ArithmeticSubtractionFixture.h b/tests/validation/fixtures/ArithmeticSubtractionFixture.h index ba0dd14414..04bb53a184 100644 --- a/tests/validation/fixtures/ArithmeticSubtractionFixture.h +++ b/tests/validation/fixtures/ArithmeticSubtractionFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -45,11 +45,10 @@ class ArithmeticSubtractionValidationFixedPointFixture : public framework::Fixtu { public: template <typename...> - void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits) + void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - _fractional_bits = fractional_bits; - _target = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits); - _reference = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits); + _target = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy); + _reference = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy); } protected: @@ -59,12 +58,12 @@ protected: library->fill_tensor_uniform(tensor, i); } - TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position) + TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { // Create tensors - TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1, fixed_point_position); - TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1, fixed_point_position); - TensorType dst = create_tensor<TensorType>(shape, output_data_type, 1, fixed_point_position); + TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1); + TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1); + TensorType dst = create_tensor<TensorType>(shape, output_data_type, 1); // Create and configure function FunctionType sub; @@ -93,11 +92,11 @@ protected: return dst; } - SimpleTensor<T3> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position) + SimpleTensor<T3> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { // Create reference - SimpleTensor<T1> ref_src1{ shape, data_type0, 1, fixed_point_position }; - SimpleTensor<T2> ref_src2{ shape, data_type1, 1, fixed_point_position }; + SimpleTensor<T1> ref_src1{ shape, data_type0, 1 }; + SimpleTensor<T2> ref_src2{ shape, data_type1, 1 }; // Fill reference fill(ref_src1, 0); @@ -108,7 +107,6 @@ protected: TensorType _target{}; SimpleTensor<T3> _reference{}; - int _fractional_bits{}; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2 = T1, typename T3 = T1> class ArithmeticSubtractionValidationFixture : public ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> @@ -117,7 +115,7 @@ public: template <typename...> void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0); + ArithmeticSubtractionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, data_type0, data_type1, output_data_type, convert_policy); } }; } // namespace validation diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h index b7e32a6f37..bc3b488a4a 100644 --- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h +++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h @@ -45,15 +45,14 @@ class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fix { public: template <typename...> - void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fractional_bits) + void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout) { - _fractional_bits = fractional_bits; - _data_type = dt; - _use_beta = use_beta; - _use_gamma = use_gamma; + _data_type = dt; + _use_beta = use_beta; + _use_gamma = use_gamma; - _target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout, fractional_bits); - _reference = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits); + _target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout); + _reference = compute_reference(shape0, shape1, epsilon, act_info, dt); } protected: @@ -93,7 +92,7 @@ protected: { int min_bound = 0; int max_bound = 0; - std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>(_fractional_bits); + std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>(); std::uniform_int_distribution<> distribution(min_bound, max_bound); std::uniform_int_distribution<> distribution_var(0, max_bound); library->fill(src_tensor, distribution, 0); @@ -115,12 +114,12 @@ protected: else { // Fill with default value 1 - library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits))); + library->fill_tensor_value(gamma_tensor, static_cast<T>(1)); } } } - TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout, int fixed_point_position) + TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout) { if(data_layout == DataLayout::NHWC) { @@ -128,12 +127,12 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout); - TensorType dst = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position, QuantizationInfo(), data_layout); - TensorType mean = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position); - TensorType var = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position); - TensorType beta = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position); - TensorType gamma = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position); + TensorType src = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout); + TensorType dst = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout); + TensorType mean = create_tensor<TensorType>(shape1, dt, 1); + TensorType var = create_tensor<TensorType>(shape1, dt, 1); + TensorType beta = create_tensor<TensorType>(shape1, dt, 1); + TensorType gamma = create_tensor<TensorType>(shape1, dt, 1); // Create and configure function FunctionType norm; @@ -172,24 +171,23 @@ protected: return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fixed_point_position) + SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt) { // Create reference - SimpleTensor<T> ref_src{ shape0, dt, 1, fixed_point_position }; - SimpleTensor<T> ref_mean{ shape1, dt, 1, fixed_point_position }; - SimpleTensor<T> ref_var{ shape1, dt, 1, fixed_point_position }; - SimpleTensor<T> ref_beta{ shape1, dt, 1, fixed_point_position }; - SimpleTensor<T> ref_gamma{ shape1, dt, 1, fixed_point_position }; + SimpleTensor<T> ref_src{ shape0, dt, 1 }; + SimpleTensor<T> ref_mean{ shape1, dt, 1 }; + SimpleTensor<T> ref_var{ shape1, dt, 1 }; + SimpleTensor<T> ref_beta{ shape1, dt, 1 }; + SimpleTensor<T> ref_gamma{ shape1, dt, 1 }; // Fill reference fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma); - return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info, fixed_point_position); + return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info); } TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; DataType _data_type{}; bool _use_beta{}; bool _use_gamma{}; @@ -202,7 +200,7 @@ public: template <typename...> void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout) { - BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout, 0); + BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, data_layout); } }; } // namespace validation diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index 00ca0778f5..7ba2583bbe 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -57,12 +57,11 @@ public: public: template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, - DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info) + DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { _data_type = data_type; _is_quantized = is_data_type_quantized_asymmetric(data_type); _bias_data_type = _is_quantized ? DataType::S32 : data_type; - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_layout = data_layout; @@ -117,10 +116,10 @@ protected: TensorShape reshaped_weights_shape(weights_shape); // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout); - TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout); - TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info, _data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info, _data_layout); + TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout); + TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info, _data_layout); + TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info, _data_layout); // Create and configure function FunctionType conv; @@ -157,9 +156,9 @@ protected: const Size2D &dilation, const ActivationLayerInfo act_info) { // Create reference - SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info }; - SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info }; + SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info }; + SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info }; + SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info }; // Fill reference fill(src, 0); @@ -176,7 +175,6 @@ protected: DataType _data_type{}; DataType _bias_data_type{}; DataLayout _data_layout{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; bool _is_quantized = false; }; @@ -189,7 +187,7 @@ public: void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { - ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout, 0, + ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_layout, QuantizationInfo(), act_info); } }; @@ -200,11 +198,11 @@ class ConvolutionValidationFixedPointFixture : public ConvolutionValidationGener public: template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, - int fractional_bits, ActivationLayerInfo act_info) + ActivationLayerInfo act_info) { ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, DataLayout::NCHW, - fractional_bits, QuantizationInfo(), act_info); + QuantizationInfo(), act_info); } }; @@ -217,7 +215,7 @@ public: DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, - data_type, data_layout, 0, quantization_info, act_info); + data_type, data_layout, quantization_info, act_info); } }; } // namespace validation diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h index 137068a4b9..12ce9cefc7 100644 --- a/tests/validation/fixtures/DeconvolutionLayerFixture.h +++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h @@ -45,13 +45,12 @@ class DeconvolutionLayerFixtureBase : public framework::Fixture public: template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, - const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, int fractional_bits) + const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type) { - _fractional_bits = fractional_bits; - _data_type = data_type; + _data_type = data_type; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, fractional_bits); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, fractional_bits); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type); } protected: @@ -70,13 +69,13 @@ protected: } TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, - const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type, int fixed_point_position) + const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> &inner_border, DataType data_type) { // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position); - TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1); + TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1); // Create and configure function FunctionType conv; @@ -110,12 +109,12 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, - const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type, int fixed_point_position) + const PadStrideInfo &info, const std::pair<unsigned int, unsigned int> inner_border, DataType data_type) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position }; - SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position }; + SimpleTensor<T> src{ input_shape, data_type, 1 }; + SimpleTensor<T> weights{ weights_shape, data_type, 1 }; + SimpleTensor<T> bias{ bias_shape, data_type, 1 }; // Fill reference fill(src, 0); @@ -127,7 +126,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; DataType _data_type{}; }; @@ -146,7 +144,7 @@ public: const std::pair<unsigned int, unsigned int> inner_border(inner_border_right, inner_border_top); auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, inner_border.first, inner_border.second, sx, sy); TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape); - DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, 0); + DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type); } }; diff --git a/tests/validation/fixtures/DepthConcatenateLayerFixture.h b/tests/validation/fixtures/DepthConcatenateLayerFixture.h index 6e112c7359..76b56ad26e 100644 --- a/tests/validation/fixtures/DepthConcatenateLayerFixture.h +++ b/tests/validation/fixtures/DepthConcatenateLayerFixture.h @@ -102,12 +102,12 @@ protected: for(const auto &shape : shapes) { - srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits)); + srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1)); src_ptrs.emplace_back(&srcs.back()); } TensorShape dst_shape = calculate_depth_concatenate_shape(shapes); - TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits); + TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1); // Create and configure function FunctionType depth_concat; @@ -151,7 +151,7 @@ protected: int i = 0; for(const auto &shape : shapes) { - srcs.emplace_back(shape, data_type, 1, _fractional_bits); + srcs.emplace_back(shape, data_type, 1); fill(srcs.back(), i++); } @@ -160,9 +160,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - -private: - int _fractional_bits{ 1 }; }; } // namespace validation } // namespace test diff --git a/tests/validation/fixtures/DepthConvertLayerFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h index 4b4e959273..eb1c083667 100644 --- a/tests/validation/fixtures/DepthConvertLayerFixture.h +++ b/tests/validation/fixtures/DepthConvertLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -45,12 +45,11 @@ class DepthConvertLayerValidationFixedPointFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fractional_bits) + void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift) { - _shift = shift; - _fractional_bits = fractional_bits; - _target = compute_target(shape, dt_in, dt_out, policy, shift, fractional_bits); - _reference = compute_reference(shape, dt_in, dt_out, policy, shift, fractional_bits); + _shift = shift; + _target = compute_target(shape, dt_in, dt_out, policy, shift); + _reference = compute_reference(shape, dt_in, dt_out, policy, shift); } protected: @@ -60,11 +59,11 @@ protected: library->fill_tensor_uniform(tensor, i); } - TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position) + TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, dt_in, 1, static_cast<int>(fixed_point_position)); - TensorType dst = create_tensor<TensorType>(shape, dt_out, 1, static_cast<int>(fixed_point_position)); + TensorType src = create_tensor<TensorType>(shape, dt_in, 1); + TensorType dst = create_tensor<TensorType>(shape, dt_out, 1); // Create and configure function FunctionType depth_convert; @@ -89,10 +88,10 @@ protected: return dst; } - SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position) + SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift) { // Create reference - SimpleTensor<T1> src{ shape, dt_in, 1, static_cast<int>(fixed_point_position) }; + SimpleTensor<T1> src{ shape, dt_in, 1 }; // Fill reference fill(src, 0); @@ -102,7 +101,6 @@ protected: TensorType _target{}; SimpleTensor<T2> _reference{}; - int _fractional_bits{}; int _shift{}; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> @@ -112,7 +110,7 @@ public: template <typename...> void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift) { - DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift, 0); + DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, shift); } }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> @@ -120,9 +118,9 @@ class DepthConvertLayerValidationFractionalBitsFixture : public DepthConvertLaye { public: template <typename...> - void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t fractional_bits) + void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy) { - DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0, fractional_bits); + DepthConvertLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, 0); } }; } // namespace validation diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h index 2f01f43d8b..5428154a2b 100644 --- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h @@ -115,10 +115,10 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, quantization_info, data_layout); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, quantization_info, data_layout); - TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, 0, quantization_info, data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, quantization_info, data_layout); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout); + TensorType biases = create_tensor<TensorType>(biases_shape, bias_data_type, 1, quantization_info, data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout); // Create Depthwise Convolution configure function FunctionType dwc; @@ -155,9 +155,9 @@ protected: unsigned int depth_multiplier, const DataType data_type, const DataType bias_data_type, const QuantizationInfo quantization_info) { - SimpleTensor<T> src{ in_shape, data_type, 1, 0, quantization_info }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, 0, quantization_info }; - SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, 0, quantization_info }; + SimpleTensor<T> src{ in_shape, data_type, 1, quantization_info }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info }; + SimpleTensor<TBias> biases{ biases_shape, bias_data_type, 1, quantization_info }; fill(src, 0); fill(weights, 1); diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h index 38ddf33e24..9a58167605 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h @@ -54,11 +54,10 @@ public: public: template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, - DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) + DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) { ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN); - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_type = data_type; @@ -67,30 +66,29 @@ public: const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR); const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - TensorInfo input_info = TensorInfo(input_shape, 1, data_type, _fractional_bits); - TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type, _fractional_bits); + TensorInfo input_info = TensorInfo(input_shape, 1, data_type); + TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type); const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info); - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info); } template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, int fractional_bits, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) + DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) { ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN); ARM_COMPUTE_UNUSED(dilation); - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_type = data_type; const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info, act_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info); } protected: @@ -124,7 +122,7 @@ protected: } TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout) + DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout) { if(data_layout == DataLayout::NHWC) { @@ -134,10 +132,10 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info, data_layout); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info, data_layout); - TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info, data_layout); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout); + TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout); // Create and configure function FunctionType conv; @@ -171,12 +169,12 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info, ActivationLayerInfo act_info) + DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position, quantization_info }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info }; + SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info }; // Fill reference fill(src, 0); @@ -190,7 +188,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; DataType _data_type{}; }; @@ -203,7 +200,7 @@ public: void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info, DataLayout data_layout) { - DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, QuantizationInfo(), + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(), act_info, data_layout); } }; @@ -213,10 +210,10 @@ class DirectConvolutionValidationFixedPointFixture : public DirectConvolutionVal { public: template <typename...> - void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits, + void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info) { - DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, fractional_bits, + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(), act_info, DataLayout::NCHW); } }; @@ -229,7 +226,7 @@ public: void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { - DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, quantization_info, + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info, act_info, DataLayout::NCHW); } }; @@ -242,7 +239,7 @@ public: void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { - DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, quantization_info, + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info, act_info, DataLayout::NCHW); } }; @@ -255,7 +252,7 @@ public: void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info) { - DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, 0, QuantizationInfo(), + DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(), act_info, DataLayout::NCHW); } }; diff --git a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h index 09b6d830b4..144c7b7d0d 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h @@ -50,9 +50,8 @@ public: public: template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, - DataType data_type, int fractional_bits, QuantizationInfo quantization_info) + DataType data_type, QuantizationInfo quantization_info) { - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_type = data_type; @@ -62,24 +61,23 @@ public: const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info); const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); } template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y, - DataType data_type, int fractional_bits, QuantizationInfo quantization_info) + DataType data_type, QuantizationInfo quantization_info) { ARM_COMPUTE_UNUSED(dilation_x, dilation_y); - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _data_type = data_type; const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); } protected: @@ -113,16 +111,16 @@ protected: } TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info) + DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info) { // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position, quantization_info); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, fixed_point_position, quantization_info); - TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position, quantization_info); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info); + TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info); TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info); - TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, fixed_point_position, quantization_info); + TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, quantization_info); // Create and configure function FunctionType conv; @@ -164,14 +162,14 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info) + DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position, quantization_info }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info }; + SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info }; - SimpleTensor<T> dst{ output_shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> dst{ output_shape, data_type, 1, quantization_info }; TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info); // Fill reference @@ -185,7 +183,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; DataType _data_type{}; @@ -212,7 +209,7 @@ public: template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type) { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, + DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo()); } }; @@ -222,10 +219,9 @@ class DirectConvolutionValidationFixedPointTensorShiftFixture : public DirectCon { public: template <typename...> - void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, int fractional_bits) + void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type) { DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, - fractional_bits, QuantizationInfo()); } }; @@ -237,7 +233,7 @@ public: template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info) { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, 0, + DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info); } }; @@ -250,7 +246,7 @@ public: void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y, DataType data_type, QuantizationInfo quantization_info) { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, 0, + DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, quantization_info); } }; @@ -263,7 +259,7 @@ public: void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y, DataType data_type) { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, 0, + DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, QuantizationInfo()); } }; diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h index 3a077dbbea..771de30917 100644 --- a/tests/validation/fixtures/DropoutLayerFixture.h +++ b/tests/validation/fixtures/DropoutLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -97,7 +97,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; }; } // namespace validation } // namespace test diff --git a/tests/validation/fixtures/FlattenLayerFixture.h b/tests/validation/fixtures/FlattenLayerFixture.h index ef94ea83b0..f273e9315c 100644 --- a/tests/validation/fixtures/FlattenLayerFixture.h +++ b/tests/validation/fixtures/FlattenLayerFixture.h @@ -53,10 +53,8 @@ public: template <typename...> void setup(TensorShape shape, DataType data_type) { - _fractional_bits = is_data_type_fixed_point(data_type) ? 4 : 0; - TensorShape shape_flatten; - TensorInfo input_info(shape, 1, data_type, _fractional_bits); + TensorInfo input_info(shape, 1, data_type); shape_flatten = compute_im2col_flatten_shape(&input_info); _target = compute_target(shape, shape_flatten, data_type); @@ -68,24 +66,15 @@ protected: template <typename U> void fill(U &&tensor) { - if(_fractional_bits == 0) - { - std::uniform_real_distribution<> distribution(-1.f, 1.f); - library->fill(tensor, distribution, 0); - } - else - { - const int one_fixed = 1 << _fractional_bits; - std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); - library->fill(tensor, distribution, 0); - } + std::uniform_real_distribution<> distribution(-1.f, 1.f); + library->fill(tensor, distribution, 0); } TensorType compute_target(const TensorShape &shape, const TensorShape &shape_flatten, DataType data_type) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, _fractional_bits); - TensorType dst = create_tensor<TensorType>(shape_flatten, data_type, 1, _fractional_bits); + TensorType src = create_tensor<TensorType>(shape, data_type, 1); + TensorType dst = create_tensor<TensorType>(shape_flatten, data_type, 1); // Create and configure function FunctionType flatten_layer; @@ -113,7 +102,7 @@ protected: SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &shape_flatten, DataType data_type) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, _fractional_bits }; + SimpleTensor<T> src{ shape, data_type, 1 }; // Fill reference fill(src); @@ -123,7 +112,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; }; } // namespace validation } // namespace test diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h index f23fc207a8..895e43b735 100644 --- a/tests/validation/fixtures/FullyConnectedLayerFixture.h +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -54,14 +54,13 @@ public: public: template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, - DataType data_type, int fractional_bits, QuantizationInfo quantization_info) + DataType data_type, QuantizationInfo quantization_info) { ARM_COMPUTE_UNUSED(weights_shape); ARM_COMPUTE_UNUSED(bias_shape); _data_type = data_type; _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights); @@ -126,10 +125,10 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info); - TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info); - TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info); - TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info); + TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info); + TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info); + TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info); + TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info); // Create and configure function. FunctionType fc; @@ -158,7 +157,7 @@ protected: if(!reshape_weights || !transpose_weights) { TensorShape tmp_shape(weights_shape); - RawTensor tmp(tmp_shape, _data_type, 1, _fractional_bits); + RawTensor tmp(tmp_shape, _data_type, 1); // Fill with original shape fill(tmp, 1); @@ -199,9 +198,9 @@ protected: bool reshape_weights) { // Create reference - SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info }; - SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info }; + SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info }; + SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info }; + SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info }; // Fill reference fill(src, 0); @@ -215,7 +214,6 @@ protected: SimpleTensor<T> _reference{}; DataType _data_type{}; DataType _bias_data_type{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; }; @@ -228,7 +226,7 @@ public: { FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - 0, QuantizationInfo()); + QuantizationInfo()); } }; @@ -237,11 +235,11 @@ class FullyConnectedLayerValidationFixedPointFixture : public FullyConnectedLaye { public: template <typename...> - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, int fractional_bits) + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type) { FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - fractional_bits, QuantizationInfo()); + QuantizationInfo()); } }; @@ -255,7 +253,7 @@ public: { FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - 0, quantization_info); + quantization_info); } }; } // namespace validation diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index 8dd2998377..e4762cc5be 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -47,13 +47,12 @@ class GEMMValidationFixedPointFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type, int fractional_bits) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type) { - _fractional_bits = fractional_bits; - _data_type = data_type; + _data_type = data_type; - _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, fractional_bits); - _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, fractional_bits); + _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type); + _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type); } protected: @@ -75,13 +74,13 @@ protected: } TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta, - DataType data_type, int fixed_point_position) + DataType data_type) { // Create tensors - TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position); - TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position); - TensorType c = create_tensor<TensorType>(shape_c, data_type, 1, fixed_point_position); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position); + TensorType a = create_tensor<TensorType>(shape_a, data_type, 1); + TensorType b = create_tensor<TensorType>(shape_b, data_type, 1); + TensorType c = create_tensor<TensorType>(shape_c, data_type, 1); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1); // Create and configure function FunctionType gemm; @@ -120,12 +119,12 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta, - DataType data_type, int fixed_point_position) + DataType data_type) { // Create reference - SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position }; - SimpleTensor<T> b{ shape_b, data_type, 1, fixed_point_position }; - SimpleTensor<T> c{ shape_c, data_type, 1, fixed_point_position }; + SimpleTensor<T> a{ shape_a, data_type, 1 }; + SimpleTensor<T> b{ shape_b, data_type, 1 }; + SimpleTensor<T> c{ shape_c, data_type, 1 }; // Fill reference fill(a, 0); @@ -137,7 +136,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; DataType _data_type{}; }; @@ -148,7 +146,7 @@ public: template <typename...> void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, DataType data_type) { - GEMMValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, 0); + GEMMValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type); } }; } // namespace validation diff --git a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h index 1f0a7429f1..9ad730c8e2 100644 --- a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h +++ b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,14 +47,13 @@ class GEMMInterleave4x4ValidationFixedPointFixture : public framework::Fixture { public: template <typename...> - void setup(size_t x, size_t y, DataType data_type, int fractional_bits) + void setup(size_t x, size_t y, DataType data_type) { - _fractional_bits = fractional_bits; - _data_type = data_type; + _data_type = data_type; const TensorShape shape_a(x, y); const TensorShape shape_b(static_cast<size_t>(x * 4.f), static_cast<size_t>(std::ceil(y / 4.f))); - _target = compute_target(shape_a, shape_b, data_type, fractional_bits); - _reference = compute_reference(shape_a, shape_b, data_type, fractional_bits); + _target = compute_target(shape_a, shape_b, data_type); + _reference = compute_reference(shape_a, shape_b, data_type); } protected: @@ -76,11 +75,11 @@ protected: } } - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position) + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type) { // Create tensors - TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position); - TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position); + TensorType a = create_tensor<TensorType>(shape_a, data_type, 1); + TensorType b = create_tensor<TensorType>(shape_b, data_type, 1); // Create and configure function FunctionType f; @@ -105,11 +104,11 @@ protected: return b; } - SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position) + SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type) { // Create reference - SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position }; - SimpleTensor<T> b{ shape_b, data_type, 1, fixed_point_position }; + SimpleTensor<T> a{ shape_a, data_type, 1 }; + SimpleTensor<T> b{ shape_b, data_type, 1 }; // Fill reference fill(a, 0); @@ -120,7 +119,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; DataType _data_type{}; }; @@ -131,7 +129,7 @@ public: template <typename...> void setup(size_t x, size_t y, DataType data_type) { - GEMMInterleave4x4ValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type, 0); + GEMMInterleave4x4ValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type); } }; diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h index d83d5e9c06..48fa55e8cc 100644 --- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h +++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,15 +47,14 @@ class GEMMTranspose1xWValidationFixedPointFixture : public framework::Fixture { public: template <typename...> - void setup(size_t x, size_t y, DataType data_type, int fractional_bits) + void setup(size_t x, size_t y, DataType data_type) { - _fractional_bits = fractional_bits; - _data_type = data_type; + _data_type = data_type; const TensorShape shape_a(x, y); const unsigned int transpose_w = 16 / data_size_from_type(data_type); const TensorShape shape_b(static_cast<size_t>(y * transpose_w), static_cast<size_t>(std::ceil(x / static_cast<float>(transpose_w)))); - _target = compute_target(shape_a, shape_b, data_type, fractional_bits); - _reference = compute_reference(shape_a, shape_b, data_type, fractional_bits); + _target = compute_target(shape_a, shape_b, data_type); + _reference = compute_reference(shape_a, shape_b, data_type); } protected: @@ -77,11 +76,11 @@ protected: } } - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position) + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type) { // Create tensors - TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, fixed_point_position); - TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, fixed_point_position); + TensorType a = create_tensor<TensorType>(shape_a, data_type, 1); + TensorType b = create_tensor<TensorType>(shape_b, data_type, 1); // Create and configure function FunctionType f; @@ -107,10 +106,10 @@ protected: return b; } - SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type, int fixed_point_position) + SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type) { // Create reference - SimpleTensor<T> a{ shape_a, data_type, 1, fixed_point_position }; + SimpleTensor<T> a{ shape_a, data_type, 1 }; // Fill reference fill(a, 0); @@ -120,7 +119,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; DataType _data_type{}; }; @@ -131,7 +129,7 @@ public: template <typename...> void setup(size_t x, size_t y, DataType data_type) { - GEMMTranspose1xWValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type, 0); + GEMMTranspose1xWValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(x, y, data_type); } }; diff --git a/tests/validation/fixtures/Im2ColFixture.h b/tests/validation/fixtures/Im2ColFixture.h index 6e532e7803..6abea27102 100644 --- a/tests/validation/fixtures/Im2ColFixture.h +++ b/tests/validation/fixtures/Im2ColFixture.h @@ -81,8 +81,8 @@ protected: TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type) { // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, _quant_info, _data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, _quant_info); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, _quant_info, _data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _quant_info); // Create and configure function FunctionType im2col_func; @@ -110,8 +110,8 @@ protected: void compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, 0, _quant_info, _data_layout }; - _reference = SimpleTensor<T>(output_shape, data_type, 1, 0, _quant_info, DataLayout::NCHW); + SimpleTensor<T> src{ input_shape, data_type, 1, _quant_info, _data_layout }; + _reference = SimpleTensor<T>(output_shape, data_type, 1, _quant_info, DataLayout::NCHW); // Fill reference fill(src); reference::im2col<T>(src, _reference, _kernel_dims, _conv_info, _has_bias); diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h index e7d83c7735..f4f9c64944 100644 --- a/tests/validation/fixtures/NormalizationLayerFixture.h +++ b/tests/validation/fixtures/NormalizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -43,40 +43,30 @@ namespace test namespace validation { template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class NormalizationValidationFixedPointFixture : public framework::Fixture +class NormalizationValidationGenericFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, int fractional_bits) + void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type) { - _fractional_bits = fractional_bits; NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled); - _target = compute_target(shape, info, data_type, fractional_bits); - _reference = compute_reference(shape, info, data_type, fractional_bits); + _target = compute_target(shape, info, data_type); + _reference = compute_reference(shape, info, data_type); } protected: template <typename U> void fill(U &&tensor) { - if(_fractional_bits == 0) - { - library->fill_tensor_uniform(tensor, 0); - } - else - { - const int one_fixed = 1 << _fractional_bits; - std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); - library->fill(tensor, distribution, 0); - } + library->fill_tensor_uniform(tensor, 0); } - TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0) + TensorType compute_target(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position); - TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position); + TensorType src = create_tensor<TensorType>(shape, data_type, 1); + TensorType dst = create_tensor<TensorType>(shape, data_type, 1); // Create and configure function FunctionType norm_layer; @@ -101,10 +91,10 @@ protected: return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type, int fixed_point_position = 0) + SimpleTensor<T> compute_reference(const TensorShape &shape, NormalizationLayerInfo info, DataType data_type) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position }; + SimpleTensor<T> src{ shape, data_type, 1 }; // Fill reference fill(src); @@ -114,17 +104,16 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class NormalizationValidationFixture : public NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T> +class NormalizationValidationFixture : public NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: template <typename...> void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type) { - NormalizationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, 0); + NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type); } }; } // namespace validation diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index 27b033a06c..24539545ca 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -47,14 +47,13 @@ class PoolingLayerValidationGenericFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, int fractional_bits, QuantizationInfo quantization_info) + void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { - _fractional_bits = fractional_bits; _quantization_info = quantization_info; _pool_info = pool_info; - _target = compute_target(shape, pool_info, data_type, data_layout, fractional_bits, quantization_info); - _reference = compute_reference(shape, pool_info, data_type, fractional_bits, quantization_info); + _target = compute_target(shape, pool_info, data_type, data_layout, quantization_info); + _reference = compute_reference(shape, pool_info, data_type, quantization_info); } protected: @@ -72,14 +71,14 @@ protected: } else { - const int one_fixed = 1 << _fractional_bits; + const int one_fixed = 1; std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); library->fill(tensor, distribution, 0); } } TensorType compute_target(TensorShape shape, PoolingLayerInfo info, - DataType data_type, DataLayout data_layout, int fixed_point_position, QuantizationInfo quantization_info) + DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { // Change shape in case of NHWC. if(data_layout == DataLayout::NHWC) @@ -88,7 +87,7 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info, data_layout); + TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info, data_layout); TensorType dst; // Create and configure function @@ -115,10 +114,10 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &shape, PoolingLayerInfo info, - DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + DataType data_type, QuantizationInfo quantization_info) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> src{ shape, data_type, 1, quantization_info }; // Fill reference fill(src); @@ -128,7 +127,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; PoolingLayerInfo _pool_info{}; }; @@ -141,7 +139,7 @@ public: void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding), - data_type, data_layout, 0, QuantizationInfo()); + data_type, data_layout, QuantizationInfo()); } }; @@ -150,10 +148,10 @@ class PoolingLayerValidationFixedPointFixture : public PoolingLayerValidationGen { public: template <typename...> - void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits) + void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding), - data_type, DataLayout::NCHW, fractional_bits, QuantizationInfo()); + data_type, DataLayout::NCHW, QuantizationInfo()); } }; @@ -166,7 +164,7 @@ public: QuantizationInfo quantization_info, DataLayout data_layout = DataLayout::NCHW) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding), - data_type, data_layout, 0, quantization_info); + data_type, data_layout, quantization_info); } }; @@ -177,7 +175,7 @@ public: template <typename...> void setup(TensorShape src_shape, PoolingLayerInfo pool_info, DataType data_type) { - PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW, 0, QuantizationInfo()); + PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW, QuantizationInfo()); } }; @@ -188,7 +186,7 @@ public: template <typename...> void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW) { - PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW, 0, QuantizationInfo()); + PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW, QuantizationInfo()); } }; diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h index 05e1bf5ffe..5413147699 100644 --- a/tests/validation/fixtures/ScaleFixture.h +++ b/tests/validation/fixtures/ScaleFixture.h @@ -100,7 +100,7 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(shape, _data_type, 1, 0, QuantizationInfo(), data_layout); + TensorType src = create_tensor<TensorType>(shape, _data_type, 1, QuantizationInfo(), data_layout); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); @@ -108,7 +108,7 @@ protected: TensorShape shape_scaled(shape); shape_scaled.set(idx_width, shape[idx_width] * scale_x); shape_scaled.set(idx_height, shape[idx_height] * scale_y); - TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, 0, QuantizationInfo(), data_layout); + TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, QuantizationInfo(), data_layout); // Create and configure function FunctionType scale; @@ -137,7 +137,7 @@ protected: InterpolationPolicy policy, BorderMode border_mode, T constant_border_value, SamplingPolicy sampling_policy) { // Create reference - SimpleTensor<T> src{ shape, _data_type, 1, 0, QuantizationInfo() }; + SimpleTensor<T> src{ shape, _data_type, 1, QuantizationInfo() }; // Fill reference fill(src); diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h index c2ab2e2ef6..59ce5192ff 100644 --- a/tests/validation/fixtures/SoftmaxLayerFixture.h +++ b/tests/validation/fixtures/SoftmaxLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,13 +47,12 @@ class SoftmaxValidationGenericFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape, DataType data_type, int fractional_bits, QuantizationInfo quantization_info, float beta) + void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta) { - _fractional_bits = fractional_bits; _quantization_info = quantization_info; - _target = compute_target(shape, data_type, fractional_bits, quantization_info, beta); - _reference = compute_reference(shape, data_type, fractional_bits, quantization_info, beta); + _target = compute_target(shape, data_type, quantization_info, beta); + _reference = compute_reference(shape, data_type, quantization_info, beta); } protected: @@ -72,18 +71,18 @@ protected: } else { - const int one_fixed = 1 << _fractional_bits; + const int one_fixed = 1; std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); library->fill(tensor, distribution, 0); } } - TensorType compute_target(const TensorShape &shape, DataType data_type, int fixed_point_position, + TensorType compute_target(const TensorShape &shape, DataType data_type, QuantizationInfo quantization_info, float beta) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info); - TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, QuantizationInfo(1.f / 256, 0)); + TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info); + TensorType dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(1.f / 256, 0)); // Create and configure function FunctionType smx_layer; @@ -108,11 +107,11 @@ protected: return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, int fixed_point_position, + SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo quantization_info, float beta) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor<T> src{ shape, data_type, 1, quantization_info }; // Fill reference fill(src); @@ -122,7 +121,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - int _fractional_bits{}; QuantizationInfo _quantization_info{}; }; @@ -135,7 +133,6 @@ public: { SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, - 0, QuantizationInfo(), beta); } @@ -146,11 +143,10 @@ class SoftmaxValidationFixedPointFixture : public SoftmaxValidationGenericFixtur { public: template <typename...> - void setup(TensorShape shape, DataType data_type, int fixed_point_position) + void setup(TensorShape shape, DataType data_type) { SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, - fixed_point_position, QuantizationInfo(), 1.0f); } @@ -165,7 +161,6 @@ public: { SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, - 0, quantization_info, beta); } diff --git a/tests/validation/fixtures/WidthConcatenateLayerFixture.h b/tests/validation/fixtures/WidthConcatenateLayerFixture.h index cf9b12eab6..caad0feee0 100644 --- a/tests/validation/fixtures/WidthConcatenateLayerFixture.h +++ b/tests/validation/fixtures/WidthConcatenateLayerFixture.h @@ -92,12 +92,12 @@ protected: for(const auto &shape : shapes) { - srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits)); + srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1)); src_ptrs.emplace_back(&srcs.back()); } TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs); - TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits); + TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1); // Create and configure function FunctionType width_concat; @@ -141,7 +141,7 @@ protected: int i = 0; for(const auto &shape : shapes) { - srcs.emplace_back(shape, data_type, 1, _fractional_bits); + srcs.emplace_back(shape, data_type, 1); fill(srcs.back(), i++); } @@ -150,9 +150,6 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; - -private: - int _fractional_bits{ 1 }; }; } // namespace validation } // namespace test diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h index ac168ebe3c..f1660e6e90 100644 --- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h +++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h @@ -201,10 +201,10 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, QuantizationInfo(), data_layout); - TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, 0, QuantizationInfo(), data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout); // Create and configure function FunctionType conv; @@ -340,8 +340,8 @@ protected: permute(input_shape, PermutationVector(2U, 0U, 1U)); } - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo()); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo()); // Create and configure function FunctionType transf; @@ -369,7 +369,7 @@ protected: SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() }; + SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() }; // Fill reference fill(src, 0, -1.f, 1.f); @@ -424,8 +424,8 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo()); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo()); // Create and configure function FunctionType filter_transform; @@ -452,7 +452,7 @@ protected: SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo() }; + SimpleTensor<T> src{ input_shape, data_type, 1, QuantizationInfo() }; // Fill reference fill(src, 0, -1.f, 1.f); @@ -502,7 +502,7 @@ protected: // Create tensors TensorType src = create_tensor<TensorType>(input_shape, data_type); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), winograd_info.output_data_layout); // Create and configure function FunctionType output_transform; |