diff options
Diffstat (limited to 'tests/validation/fixtures/FFTFixture.h')
-rw-r--r-- | tests/validation/fixtures/FFTFixture.h | 100 |
1 files changed, 72 insertions, 28 deletions
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index dad774ce51..024227b22a 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FFTValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -59,8 +58,23 @@ protected: template <typename U> void fill(U &&tensor) { - std::uniform_real_distribution<float> distribution(-5.f, 5.f); - library->fill(tensor, distribution, 0); + switch(tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -5.0f, 5.0f }; + library->fill(tensor, distribution, 0); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-5.0f, 5.0f); + library->fill(tensor, distribution, 0); + break; + } + default: + library->fill_tensor_uniform(tensor, 0); + } } TensorType compute_target(const TensorShape &shape, DataType data_type) @@ -73,15 +87,17 @@ protected: FunctionType fft; fft.configure(&src, &dst, InfoType()); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &dst }); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -117,26 +133,46 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FFTConvolutionValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) + DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false) { - _data_type = data_type; - _data_layout = data_layout; + _mixed_layout = mixed_layout; + _data_type = data_type; + _data_layout = data_layout; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + template <typename U> void fill(U &&tensor, int i) { switch(tensor.data_type()) { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; + library->fill(tensor, distribution, i); + break; + } case DataType::F32: { - std::uniform_real_distribution<> distribution(-1.0f, 1.0f); + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); break; } @@ -164,14 +200,16 @@ protected: TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout); TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout); + add_padding_x({ &src, &weights, &bias, &dst }, _data_layout); + // Create and configure function FunctionType conv; - conv.configure(&src, &weights, &bias, &dst, info, act_info); + conv.configure(&src, &weights, &bias, &dst, info, act_info, _data_type == DataType::F16); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -179,19 +217,25 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); fill(AccessorType(weights), 1); fill(AccessorType(bias), 2); - // Compute convolution function - conv.run(); - + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } @@ -218,18 +262,18 @@ protected: SimpleTensor<T> _reference{}; DataType _data_type{}; DataLayout _data_layout{}; + bool _mixed_layout{ false }; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, - data_type, data_layout, act_info); + data_type, data_layout, act_info, mixed_layout); } }; } // namespace validation |