From 8be9148814b88e5b0cabd5a4d2b1f4ff470a8c1c Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 26 Mar 2019 17:23:28 +0000 Subject: COMPMID-1959: Implements 2D FFT on OpenCL Change-Id: I73cf3984a5463acc854c8a59dc2bd9a5234cd99c Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/936 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice --- tests/benchmark/CL/ConvolutionLayer.cpp | 18 ++- tests/benchmark/CL/FFT.cpp | 7 +- .../fixtures/FFTConvolutionLayerFixture.h | 100 +++++++++++++++ tests/benchmark/fixtures/FFTFixture.h | 6 +- tests/datasets/SmallConvolutionLayerDataset.h | 13 +- .../resnet12/ResNet12ConvolutionLayerDataset.h | 51 ++++++++ tests/validation/CL/ConvolutionLayer.cpp | 2 +- tests/validation/CL/FFT.cpp | 119 ++++++++++++++++-- tests/validation/CL/ReductionOperation.cpp | 2 +- .../validation/fixtures/ConvolutionLayerFixture.h | 2 +- tests/validation/fixtures/FFTFixture.h | 138 ++++++++++++++++++++- 11 files changed, 433 insertions(+), 25 deletions(-) create mode 100644 tests/benchmark/fixtures/FFTConvolutionLayerFixture.h create mode 100644 tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h (limited to 'tests') diff --git a/tests/benchmark/CL/ConvolutionLayer.cpp b/tests/benchmark/CL/ConvolutionLayer.cpp index 5eb33658ff..20828b7717 100644 --- a/tests/benchmark/CL/ConvolutionLayer.cpp +++ b/tests/benchmark/CL/ConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -29,6 +29,7 @@ #include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h" #include "tests/CL/CLAccessor.h" #include "tests/benchmark/fixtures/ConvolutionLayerFixture.h" +#include "tests/benchmark/fixtures/FFTConvolutionLayerFixture.h" #include "tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h" #include "tests/datasets/system_tests/alexnet/AlexNetConvolutionLayerDataset.h" #include "tests/datasets/system_tests/googlenet/inceptionv1/GoogLeNetInceptionV1ConvolutionLayerDataset.h" @@ -41,6 +42,9 @@ #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" #include "utils/TypePrinter.h" +#include +#include +#include namespace arm_compute { @@ -53,11 +57,17 @@ namespace const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QASYMM8 }); } // namespace -using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture; - TEST_SUITE(CL) -using CLWinogradLayerFixture = WinogradConvolutionLayerFixture; +using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture; +using CLWinogradLayerFixture = WinogradConvolutionLayerFixture; +using CLFFTConvolutionLayerFixture = FFTConvolutionLayerFixture; + +REGISTER_FIXTURE_DATA_TEST_CASE(ResNet12FFTLayer, CLFFTConvolutionLayerFixture, framework::DatasetMode::ALL, + framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::ResNet12FFTConvolutionLayerDataset(), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("Batches", 1))); REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, CLWinogradLayerFixture, framework::DatasetMode::ALL, framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(), diff --git a/tests/benchmark/CL/FFT.cpp b/tests/benchmark/CL/FFT.cpp index b345d58eaf..7f1ae63708 100644 --- a/tests/benchmark/CL/FFT.cpp +++ b/tests/benchmark/CL/FFT.cpp @@ -24,6 +24,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/functions/CLFFT1D.h" +#include "arm_compute/runtime/CL/functions/CLFFT2D.h" #include "tests/CL/CLAccessor.h" #include "tests/benchmark/fixtures/FFTFixture.h" #include "tests/framework/Macros.h" @@ -42,13 +43,17 @@ const auto data_types = framework::dataset::make("DataType", { DataType::F32 }); const auto shapes = framework::dataset::make("Shapes", { TensorShape(192U, 128U, 64U), TensorShape(224U, 224U) }); } // namespace -using CLFFT1DFixture = FFT1DFixture; +using CLFFT1DFixture = FFTFixture; +using CLFFT2DFixture = FFTFixture; TEST_SUITE(CL) REGISTER_FIXTURE_DATA_TEST_CASE(FFT1D, CLFFT1DFixture, framework::DatasetMode::ALL, framework::dataset::combine(shapes, data_types)); +REGISTER_FIXTURE_DATA_TEST_CASE(FFT2D, CLFFT2DFixture, framework::DatasetMode::ALL, + framework::dataset::combine(shapes, data_types)); + TEST_SUITE_END() // CL } // namespace benchmark } // namespace test diff --git a/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h b/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h new file mode 100644 index 0000000000..2c53e3ad9b --- /dev/null +++ b/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE +#define ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/Globals.h" +#include "tests/Utils.h" +#include "tests/framework/Fixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace benchmark +{ +/** Fixture that can be used for NEON and CL */ +template +class FFTConvolutionLayerFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, ActivationLayerInfo act_info, DataType data_type, + int batches) + { + ARM_COMPUTE_UNUSED(dilation); + + // Set batched in source and destination shapes + + src_shape.set(3 /* batch */, batches); + dst_shape.set(3 /* batch */, batches); + + // Create tensors + src = create_tensor(src_shape, data_type, 1); + weights = create_tensor(weights_shape, data_type, 1); + biases = create_tensor(biases_shape, data_type, 1); + dst = create_tensor(dst_shape, data_type, 1); + + // Create and configure function + conv_layer.configure(&src, &weights, &biases, &dst, info, act_info); + + // Allocate tensors + src.allocator()->allocate(); + weights.allocator()->allocate(); + biases.allocator()->allocate(); + dst.allocator()->allocate(); + } + + void run() + { + conv_layer.run(); + } + + void sync() + { + sync_if_necessary(); + sync_tensor_if_necessary(dst); + } + + void teardown() + { + src.allocator()->free(); + weights.allocator()->free(); + biases.allocator()->free(); + dst.allocator()->free(); + } + +private: + TensorType src{}; + TensorType weights{}; + TensorType biases{}; + TensorType dst{}; + Function conv_layer{}; +}; +} // namespace benchmark +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE */ diff --git a/tests/benchmark/fixtures/FFTFixture.h b/tests/benchmark/fixtures/FFTFixture.h index c9c4e3a88e..53897b1b14 100644 --- a/tests/benchmark/fixtures/FFTFixture.h +++ b/tests/benchmark/fixtures/FFTFixture.h @@ -36,8 +36,8 @@ namespace test { namespace benchmark { -template -class FFT1DFixture : public framework::Fixture +template +class FFTFixture : public framework::Fixture { public: template @@ -48,7 +48,7 @@ public: dst = create_tensor(shape, data_type, 2); // Create and configure function - fft_func.configure(&src, &dst, FFT1DInfo()); + fft_func.configure(&src, &dst, FFTInfo()); // Allocate tensors src.allocator()->allocate(); diff --git a/tests/datasets/SmallConvolutionLayerDataset.h b/tests/datasets/SmallConvolutionLayerDataset.h index 73f1554c49..22d0bc582a 100644 --- a/tests/datasets/SmallConvolutionLayerDataset.h +++ b/tests/datasets/SmallConvolutionLayerDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -132,6 +132,17 @@ public: } }; +class SmallFFTConvolutionLayerDataset final : public ConvolutionLayerDataset +{ +public: + SmallFFTConvolutionLayerDataset() + { + add_config(TensorShape(8U, 7U, 3U), TensorShape(3U, 3U, 3U, 2U), TensorShape(2U), TensorShape(8U, 7U, 2U), PadStrideInfo(1, 1, 1, 1)); + add_config(TensorShape(64U, 32U, 5U), TensorShape(5U, 5U, 5U, 10U), TensorShape(10U), TensorShape(64U, 32U, 10U), PadStrideInfo(1, 1, 2, 2)); + add_config(TensorShape(192U, 128U, 8U), TensorShape(9U, 9U, 8U, 3U), TensorShape(3U), TensorShape(192U, 128U, 3U), PadStrideInfo(1, 1, 4, 4)); + } +}; + class SmallConvolutionLayerDataset final : public ConvolutionLayerDataset { public: diff --git a/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h b/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h new file mode 100644 index 0000000000..b960dceafd --- /dev/null +++ b/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET +#define ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET + +#include "tests/datasets/ConvolutionLayerDataset.h" + +#include "utils/TypePrinter.h" + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +namespace test +{ +namespace datasets +{ +class ResNet12FFTConvolutionLayerDataset final : public ConvolutionLayerDataset +{ +public: + ResNet12FFTConvolutionLayerDataset() + { + add_config(TensorShape(192U, 128U, 64U), TensorShape(9U, 9U, 64U, 3U), TensorShape(3U), TensorShape(192U, 128U, 3U), PadStrideInfo(1, 1, 4, 4)); + } +}; +} // namespace datasets +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET */ diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index 41d2b7bb5e..f1f9b59330 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -46,7 +46,7 @@ namespace validation namespace { constexpr AbsoluteTolerance absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ -RelativeTolerance tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ +RelativeTolerance tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */ RelativeTolerance tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */ constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp index 0d29532c29..9fdd85b604 100644 --- a/tests/validation/CL/FFT.cpp +++ b/tests/validation/CL/FFT.cpp @@ -24,7 +24,10 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/functions/CLFFT1D.h" +#include "arm_compute/runtime/CL/functions/CLFFT2D.h" +#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h" #include "tests/CL/CLAccessor.h" +#include "tests/datasets/SmallConvolutionLayerDataset.h" #include "tests/framework/Asserts.h" #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" @@ -40,7 +43,7 @@ namespace validation namespace { const auto data_types = framework::dataset::make("DataType", { DataType::F32 }); -const auto shapes = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U), +const auto shapes_1d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U), TensorShape(4U, 2U, 3U), TensorShape(5U, 2U, 3U), TensorShape(7U, 2U, 3U), TensorShape(8U, 2U, 3U), TensorShape(9U, 2U, 3U), TensorShape(25U, 2U, 3U), @@ -48,11 +51,27 @@ const auto shapes = framework::dataset::make("TensorShape", { TensorShape(2U TensorShape(16U, 2U, 3U), TensorShape(32U, 2U, 3U), TensorShape(96U, 2U, 2U) }); +const auto shapes_2d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 6U, 3U), + TensorShape(4U, 5U, 3U), TensorShape(5U, 7U, 3U), + TensorShape(7U, 25U, 3U), TensorShape(8U, 2U, 3U), + TensorShape(9U, 16U, 3U), TensorShape(25U, 32U, 3U), + TensorShape(192U, 128U, 2U) + }); + +const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) +}); + +RelativeTolerance tolerance_f32(0.1f); /**< Relative tolerance value for FP32 */ +constexpr float tolerance_num = 0.07f; /**< Tolerance number */ + } // namespace TEST_SUITE(CL) TEST_SUITE(FFT1D) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_types), +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_1d, data_types), shape, data_type) { // Create tensors @@ -81,19 +100,19 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching data types TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid channels + TensorInfo(TensorShape(32U, 13U, 2U), 3, DataType::F32), // Invalid channels TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Unsupported axis TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F16), TensorInfo(TensorShape(16U, 13U, 2U), 2, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32), TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32), })), - framework::dataset::make("Axis", { 0, 0, 0, 1, 0, 0 })), + framework::dataset::make("Axis", { 0, 0, 0, 2, 0, 0 })), framework::dataset::make("Expected", { false, false, false, false, false, true })), input_info, output_info, axis, expected) { @@ -106,19 +125,103 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( // *INDENT-ON* template -using CLFFT1DFixture = FFTValidationFixture; +using CLFFT1DFixture = FFTValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture, framework::DatasetMode::ALL, combine(shapes, framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture, framework::DatasetMode::ALL, combine(shapes_1d, framework::dataset::make("DataType", DataType::F32))) { // Validate output - validate(CLAccessor(_target), _reference, RelativeTolerance(0.1f), 0.05f); + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num); } TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float TEST_SUITE_END() // FFT1D + +TEST_SUITE(FFT2D) + +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_2d, data_types), + shape, data_type) +{ + // Create tensors + CLTensor src = create_tensor(shape, data_type, 2); + CLTensor dst = create_tensor(shape, data_type, 2); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLFFT2D fft2d; + fft2d.configure(&src, &dst, FFT2DInfo()); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(shape); + validate(src.info()->valid_region(), valid_region); + validate(dst.info()->valid_region(), valid_region); + + // Validate padding + validate(src.info()->padding(), PaddingSize()); + validate(dst.info()->padding(), PaddingSize()); +} + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching data types + TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(32U, 25U, 2U), 3, DataType::F32), // Invalid channels + TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT + TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F16), + TensorInfo(TensorShape(16U, 25U, 2U), 2, DataType::F32), + TensorInfo(TensorShape(32U, 25U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), + TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), + })), + framework::dataset::make("Expected", { false, false, false, false, true })), + input_info, output_info, expected) +{ + const Status s = CLFFT2D::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), FFT2DInfo()); + ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +template +using CLFFT2DFixture = FFTValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT2DFixture, framework::DatasetMode::ALL, combine(shapes_2d, framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num); +} +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // Float +TEST_SUITE_END() // FFT2D + +TEST_SUITE(FFTConvolutionLayer) + +template +using CLFFTConvolutionLayerFixture = FFTConvolutionValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num); +} +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // Float +TEST_SUITE_END() // FFTConvolutionLayer + TEST_SUITE_END() // CL } // namespace validation } // namespace test diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp index c8474e97e6..79308c8229 100644 --- a/tests/validation/CL/ReductionOperation.cpp +++ b/tests/validation/CL/ReductionOperation.cpp @@ -63,7 +63,7 @@ TEST_SUITE(ReductionOperation) // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output - TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1 + TensorInfo(TensorShape(128U, 64U), 3, DataType::F32), // Number of Input channels != 1 TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != QASYMM8/F16/F32 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions TensorInfo(TensorShape(128U, 64U), 1, DataType::QASYMM8), // Axis == 0 and SUM_SQUARE and QASYMM8 diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index 795b9de6cd..52fa8da60b 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index 8e3c01eaff..1aaa5965b2 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -31,6 +31,8 @@ #include "tests/IAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" +#include "tests/validation/reference/ActivationLayer.h" +#include "tests/validation/reference/ConvolutionLayer.h" #include "tests/validation/reference/DFT.h" #include @@ -41,7 +43,7 @@ namespace test { namespace validation { -template +template class FFTValidationFixture : public framework::Fixture { public: @@ -68,8 +70,8 @@ protected: TensorType dst = create_tensor(shape, data_type, 2); // Create and configure function - FunctionType fft1d; - fft1d.configure(&src, &dst, FFT1DInfo()); + FunctionType fft; + fft.configure(&src, &dst, InfoType()); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -85,7 +87,7 @@ protected: fill(AccessorType(src)); // Compute function - fft1d.run(); + fft.run(); return dst; } @@ -97,12 +99,138 @@ protected: // Fill reference fill(src); + if(std::is_same::value) + { + return reference::dft_1d(src, reference::FFTDirection::Forward); + } + else + { + return reference::dft_2d(src, reference::FFTDirection::Forward); + } + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; + +template +class FFTConvolutionValidationGenericFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, + DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) + { + _data_type = data_type; + _data_layout = data_layout; + + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); + } + +protected: + template + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::F32: + { + std::uniform_real_distribution<> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + + TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info, + const Size2D &dilation, const ActivationLayerInfo act_info) + { + ARM_COMPUTE_UNUSED(dilation); + ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0); + + if(_data_layout == DataLayout::NHWC) + { + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + } + + // Create tensors + TensorType src = create_tensor(input_shape, _data_type, 1, QuantizationInfo(), _data_layout); + TensorType weights = create_tensor(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout); + TensorType bias = create_tensor(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout); + TensorType dst = create_tensor(output_shape, _data_type, 1, QuantizationInfo(), _data_layout); + + // Create and configure function + FunctionType conv; + conv.configure(&src, &weights, &bias, &dst, info, act_info); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + weights.allocator()->allocate(); + bias.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src), 0); + fill(AccessorType(weights), 1); + fill(AccessorType(bias), 2); + + // Compute convolution function + conv.run(); + + return dst; + } - return reference::dft_1d(src, reference::FFTDirection::Forward); + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, + const Size2D &dilation, const ActivationLayerInfo act_info) + { + ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0); + + // Create reference + SimpleTensor src{ input_shape, _data_type, 1 }; + SimpleTensor weights{ weights_shape, _data_type, 1 }; + SimpleTensor bias{ bias_shape, _data_type, 1 }; + + // Fill reference + fill(src, 0); + fill(weights, 1); + fill(bias, 2); + + return (act_info.enabled()) ? reference::activation_layer(reference::convolution_layer(src, weights, bias, output_shape, info, dilation), act_info) : reference::convolution_layer(src, + weights, bias, output_shape, info, dilation); } TensorType _target{}; SimpleTensor _reference{}; + DataType _data_type{}; + DataLayout _data_layout{}; +}; + +template +class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, + DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) + { + FFTConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, + data_type, data_layout, act_info); + } }; } // namespace validation } // namespace test -- cgit v1.2.1