From 6bff195a51915fd88c1aa1904cf269dbd1a04f50 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 2 Oct 2019 17:22:11 +0100 Subject: COMPMID-2486: Remove disabled compiler warnings Removed -Wno-unused-parameter and -Wno-deprecated-declarations compilation flags. Plus, 3RDPARTY_UPDATE. Change-Id: I43098c7af527d5651aad3c597b508a56f8813dda Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/2041 Comments-Addressed: Arm Jenkins Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- tests/validation/fixtures/DropoutLayerFixture.h | 3 +- .../fixtures/FullyConnectedLayerFixture.h | 7 ++- .../fixtures/FuseBatchNormalizationFixture.h | 4 +- tests/validation/fixtures/GEMMFixture.h | 53 ++++++++++++---------- .../validation/fixtures/GEMMTranspose1xWFixture.h | 6 +-- .../fixtures/InstanceNormalizationLayerFixture.h | 4 +- tests/validation/fixtures/PermuteFixture.h | 8 ++-- tests/validation/fixtures/PoolingLayerFixture.h | 2 +- .../fixtures/UNIT/DynamicTensorFixture.h | 1 + tests/validation/fixtures/WarpPerspectiveFixture.h | 12 ++--- .../fixtures/WinogradConvolutionLayerFixture.h | 22 +++------ 11 files changed, 58 insertions(+), 64 deletions(-) (limited to 'tests/validation/fixtures') diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h index 771de30917..be25802650 100644 --- a/tests/validation/fixtures/DropoutLayerFixture.h +++ b/tests/validation/fixtures/DropoutLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -93,6 +93,7 @@ protected: SimpleTensor compute_reference(const TensorShape &shape, DataType data_type) { + ARM_COMPUTE_UNUSED(shape, data_type); } TensorType _target{}; diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h index 1e4a74445f..0449d80de8 100644 --- a/tests/validation/fixtures/FullyConnectedLayerFixture.h +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -64,7 +64,7 @@ public: _quantization_info = quantization_info; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape); } protected: @@ -181,8 +181,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, bool transpose_weights, - bool reshape_weights) + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape) { // Create reference SimpleTensor src{ input_shape, _data_type, 1, _quantization_info }; diff --git a/tests/validation/fixtures/FuseBatchNormalizationFixture.h b/tests/validation/fixtures/FuseBatchNormalizationFixture.h index 4a81fb0823..780b4a0fb3 100644 --- a/tests/validation/fixtures/FuseBatchNormalizationFixture.h +++ b/tests/validation/fixtures/FuseBatchNormalizationFixture.h @@ -51,7 +51,7 @@ public: void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta) { std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta); - std::tie(_reference_w, _reference_b) = compute_reference(shape_w, data_type, data_layout, with_bias, with_gamma, with_beta); + std::tie(_reference_w, _reference_b) = compute_reference(shape_w, data_type, with_bias, with_gamma, with_beta); } protected: @@ -138,7 +138,7 @@ protected: return std::make_pair(std::move(in_place_w ? w : w_fused), std::move(in_place_b ? b : b_fused)); } - std::pair, SimpleTensor> compute_reference(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool with_bias, bool with_gamma, bool with_beta) + std::pair, SimpleTensor> compute_reference(TensorShape shape_w, DataType data_type, bool with_bias, bool with_gamma, bool with_beta) { const TensorShape shape_v(shape_w[dims_weights - 1]); diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index bf919c9b09..efe7567075 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -51,8 +51,9 @@ public: template void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type) { - _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, pretranspose, data_type); - _reference = compute_reference(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type); + ARM_COMPUTE_UNUSED(pretranspose); + _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type); + _reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type); } protected: @@ -74,7 +75,7 @@ protected: } TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta, - bool pretranspose, DataType data_type) + DataType data_type) { // Create tensors TensorType a = create_tensor(shape_a, data_type, 1); @@ -124,7 +125,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta, + SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, float alpha, float beta, DataType data_type) { TensorShape shape_a_to_use = shape_a; @@ -183,7 +184,7 @@ public: broadcast_bias ? 1 : batch_size); _target = compute_target(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, fp16_mixed_precision, act_info, gpu_arch); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -244,7 +245,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -289,6 +290,8 @@ public: void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch) { + ARM_COMPUTE_UNUSED(broadcast_bias); + // In case of GEMM3D, m is the product between m_w and m_h const unsigned int m = m_w * m_h; @@ -298,7 +301,7 @@ public: const TensorShape bias_shape(n, 1, 1); _target = compute_target(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, fp16_mixed_precision, act_info, gpu_arch); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -355,7 +358,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -421,7 +424,7 @@ public: broadcast_bias ? 1 : batch_size); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, fp16_mixed_precision, act_info, gpu_arch); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -494,7 +497,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -539,6 +542,8 @@ public: void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, unsigned int v0, unsigned int h0, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch) { + ARM_COMPUTE_UNUSED(broadcast_bias); + GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = 4; lhs_info.k0 = 4; @@ -562,7 +567,7 @@ public: const TensorShape bias_shape(n, 1, 1); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, fp16_mixed_precision, act_info, gpu_arch); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -631,7 +636,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -697,7 +702,7 @@ public: broadcast_bias ? 1 : batch_size); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -778,7 +783,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -854,7 +859,7 @@ public: const TensorShape bias_shape(n, 1, 1); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -931,7 +936,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -1001,7 +1006,7 @@ public: broadcast_bias ? 1 : batch_size); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -1075,7 +1080,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -1140,7 +1145,7 @@ public: const TensorShape bias_shape(n, 1, 1); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -1211,7 +1216,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -1271,7 +1276,7 @@ public: broadcast_bias ? 1 : batch_size); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info); } protected: @@ -1337,7 +1342,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; @@ -1399,7 +1404,7 @@ public: const TensorShape bias_shape(n, 1, 1); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h, act_info); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, m_h, act_info); } protected: @@ -1463,7 +1468,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h, + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, unsigned int m_h, const ActivationLayerInfo &act_info) { TensorShape dst_shape = lhs_shape; diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h index af2a3b278d..89d2238344 100644 --- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h +++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -54,7 +54,7 @@ public: const unsigned int transpose_w = 16 / data_size_from_type(data_type); const TensorShape shape_b(static_cast(y * transpose_w), static_cast(std::ceil(x / static_cast(transpose_w)))); _target = compute_target(shape_a, shape_b, data_type); - _reference = compute_reference(shape_a, shape_b, data_type); + _reference = compute_reference(shape_a, data_type); } protected: @@ -106,7 +106,7 @@ protected: return b; } - SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, DataType data_type) + SimpleTensor compute_reference(const TensorShape &shape_a, DataType data_type) { // Create reference SimpleTensor a{ shape_a, data_type, 1 }; diff --git a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h index 175ef2fb90..5e230d4430 100644 --- a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h +++ b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h @@ -48,7 +48,7 @@ public: void setup(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place) { _target = compute_target(shape, data_type, data_layout, in_place); - _reference = compute_reference(shape, data_type, data_layout); + _reference = compute_reference(shape, data_type); } protected: @@ -118,7 +118,7 @@ protected: } } - SimpleTensor compute_reference(const TensorShape &shape, DataType data_type, DataLayout data_layout) + SimpleTensor compute_reference(const TensorShape &shape, DataType data_type) { std::mt19937 gen(library->seed()); std::uniform_real_distribution dist_gamma(1.f, 2.f); diff --git a/tests/validation/fixtures/PermuteFixture.h b/tests/validation/fixtures/PermuteFixture.h index 92d01a5654..76351734d5 100644 --- a/tests/validation/fixtures/PermuteFixture.h +++ b/tests/validation/fixtures/PermuteFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -46,9 +46,9 @@ class PermuteValidationFixture : public framework::Fixture { public: template - void setup(TensorShape input_shape, DataLayout input_layout, PermutationVector perm, DataType data_type) + void setup(TensorShape input_shape, PermutationVector perm, DataType data_type) { - _target = compute_target(input_shape, input_layout, data_type, perm); + _target = compute_target(input_shape, data_type, perm); _reference = compute_reference(input_shape, data_type, perm); } @@ -59,7 +59,7 @@ protected: library->fill_tensor_uniform(tensor, 0); } - TensorType compute_target(const TensorShape &input_shape, DataLayout input_layout, DataType data_type, PermutationVector perm) + TensorType compute_target(const TensorShape &input_shape, DataType data_type, PermutationVector perm) { // Permute shapes TensorShape output_shape = input_shape; diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index cdc2cae584..18577edc66 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -182,7 +182,7 @@ public: template void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW) { - PoolingLayerValidationGenericFixture::setup(shape, PoolingLayerInfo(pool_type), data_type, DataLayout::NCHW); + PoolingLayerValidationGenericFixture::setup(shape, PoolingLayerInfo(pool_type), data_type, data_layout); } }; diff --git a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h index b2600f13f0..08b90c5b52 100644 --- a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h +++ b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h @@ -104,6 +104,7 @@ public: } void configure(ITensorType *src, ITensorType *dst) { + ARM_COMPUTE_UNUSED(src, dst); } void run() { diff --git a/tests/validation/fixtures/WarpPerspectiveFixture.h b/tests/validation/fixtures/WarpPerspectiveFixture.h index 0eba97c47c..aa84946e94 100644 --- a/tests/validation/fixtures/WarpPerspectiveFixture.h +++ b/tests/validation/fixtures/WarpPerspectiveFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -58,14 +58,12 @@ public: constant_border_value = distribution_u8(gen); } - const TensorShape vmask_shape(input_shape); - // Create the matrix std::array matrix = { { 0 } }; fill_warp_matrix<9>(matrix); - _target = compute_target(input_shape, vmask_shape, matrix, policy, border_mode, constant_border_value, data_type); - _reference = compute_reference(input_shape, vmask_shape, matrix, policy, border_mode, constant_border_value, data_type); + _target = compute_target(input_shape, matrix, policy, border_mode, constant_border_value, data_type); + _reference = compute_reference(input_shape, matrix, policy, border_mode, constant_border_value, data_type); } protected: @@ -75,7 +73,7 @@ protected: library->fill_tensor_uniform(tensor, 0); } - TensorType compute_target(const TensorShape &shape, const TensorShape &vmask_shape, const std::array &matrix, InterpolationPolicy policy, BorderMode border_mode, + TensorType compute_target(const TensorShape &shape, const std::array &matrix, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value, DataType data_type) { @@ -106,7 +104,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape, const TensorShape &vmask_shape, const std::array &matrix, InterpolationPolicy policy, BorderMode border_mode, + SimpleTensor compute_reference(const TensorShape &shape, const std::array &matrix, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value, DataType data_type) { diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h index c0ba57a828..9c2df9ef4b 100644 --- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h +++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h @@ -81,8 +81,6 @@ protected: default: { ARM_COMPUTE_ERROR("Not supported"); - library->fill_tensor_uniform(tensor, i); - break; } } } @@ -168,7 +166,7 @@ public: { ARM_COMPUTE_UNUSED(dilation); _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); + _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); } protected: @@ -192,8 +190,6 @@ protected: default: { ARM_COMPUTE_ERROR("Not supported"); - library->fill_tensor_uniform(tensor, i); - break; } } } @@ -247,7 +243,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const PadStrideInfo &info, DataType data_type, ActivationLayerInfo act_info) { // Create reference @@ -332,7 +328,7 @@ public: TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); - _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type); + _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: @@ -351,8 +347,6 @@ protected: default: { ARM_COMPUTE_ERROR("Not supported"); - library->fill_tensor_uniform(tensor, i); - break; } } } @@ -390,7 +384,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type) + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type) { // Create reference SimpleTensor src{ input_shape, data_type, 1, QuantizationInfo() }; @@ -416,7 +410,7 @@ public: TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); - _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type); + _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: @@ -435,8 +429,6 @@ protected: default: { ARM_COMPUTE_ERROR("Not supported"); - library->fill_tensor_uniform(tensor, i); - break; } } } @@ -474,7 +466,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type) + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type) { // Create reference SimpleTensor src{ input_shape, data_type, 1, QuantizationInfo() }; @@ -516,8 +508,6 @@ protected: default: { ARM_COMPUTE_ERROR("Not supported"); - library->fill_tensor_uniform(tensor, i); - break; } } } -- cgit v1.2.1