aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-03-26 17:23:28 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2019-04-11 09:34:26 +0000
commit8be9148814b88e5b0cabd5a4d2b1f4ff470a8c1c (patch)
tree760658b8c7b8917379467bd3fc119a5502faa850 /tests
parenta50e702289af66944e860eafc7f3b32f6c5f30be (diff)
downloadComputeLibrary-8be9148814b88e5b0cabd5a4d2b1f4ff470a8c1c.tar.gz
COMPMID-1959: Implements 2D FFT on OpenCL
Change-Id: I73cf3984a5463acc854c8a59dc2bd9a5234cd99c Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/936 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/benchmark/CL/ConvolutionLayer.cpp18
-rw-r--r--tests/benchmark/CL/FFT.cpp7
-rw-r--r--tests/benchmark/fixtures/FFTConvolutionLayerFixture.h100
-rw-r--r--tests/benchmark/fixtures/FFTFixture.h6
-rw-r--r--tests/datasets/SmallConvolutionLayerDataset.h13
-rw-r--r--tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h51
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp2
-rw-r--r--tests/validation/CL/FFT.cpp119
-rw-r--r--tests/validation/CL/ReductionOperation.cpp2
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h2
-rw-r--r--tests/validation/fixtures/FFTFixture.h138
11 files changed, 433 insertions, 25 deletions
diff --git a/tests/benchmark/CL/ConvolutionLayer.cpp b/tests/benchmark/CL/ConvolutionLayer.cpp
index 5eb33658ff..20828b7717 100644
--- a/tests/benchmark/CL/ConvolutionLayer.cpp
+++ b/tests/benchmark/CL/ConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,7 @@
#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/benchmark/fixtures/ConvolutionLayerFixture.h"
+#include "tests/benchmark/fixtures/FFTConvolutionLayerFixture.h"
#include "tests/benchmark/fixtures/WinogradConvolutionLayerFixture.h"
#include "tests/datasets/system_tests/alexnet/AlexNetConvolutionLayerDataset.h"
#include "tests/datasets/system_tests/googlenet/inceptionv1/GoogLeNetInceptionV1ConvolutionLayerDataset.h"
@@ -41,6 +42,9 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "utils/TypePrinter.h"
+#include <arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h>
+#include <tests/datasets/SmallConvolutionLayerDataset.h>
+#include <tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h>
namespace arm_compute
{
@@ -53,11 +57,17 @@ namespace
const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32, DataType::QASYMM8 });
} // namespace
-using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture<CLTensor, CLGEMMConvolutionLayer, CLAccessor>;
-
TEST_SUITE(CL)
-using CLWinogradLayerFixture = WinogradConvolutionLayerFixture<CLTensor, CLWinogradConvolutionLayer, CLAccessor>;
+using CLGEMMConvolutionLayerFixture = ConvolutionLayerFixture<CLTensor, CLGEMMConvolutionLayer, CLAccessor>;
+using CLWinogradLayerFixture = WinogradConvolutionLayerFixture<CLTensor, CLWinogradConvolutionLayer, CLAccessor>;
+using CLFFTConvolutionLayerFixture = FFTConvolutionLayerFixture<CLTensor, CLFFTConvolutionLayer, CLAccessor>;
+
+REGISTER_FIXTURE_DATA_TEST_CASE(ResNet12FFTLayer, CLFFTConvolutionLayerFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::ResNet12FFTConvolutionLayerDataset(),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(AlexNetWinogradLayer, CLWinogradLayerFixture, framework::DatasetMode::ALL,
framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::AlexNetWinogradLayerDataset(),
diff --git a/tests/benchmark/CL/FFT.cpp b/tests/benchmark/CL/FFT.cpp
index b345d58eaf..7f1ae63708 100644
--- a/tests/benchmark/CL/FFT.cpp
+++ b/tests/benchmark/CL/FFT.cpp
@@ -24,6 +24,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
#include "tests/CL/CLAccessor.h"
#include "tests/benchmark/fixtures/FFTFixture.h"
#include "tests/framework/Macros.h"
@@ -42,13 +43,17 @@ const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
const auto shapes = framework::dataset::make("Shapes", { TensorShape(192U, 128U, 64U), TensorShape(224U, 224U) });
} // namespace
-using CLFFT1DFixture = FFT1DFixture<CLTensor, CLFFT1D, CLAccessor>;
+using CLFFT1DFixture = FFTFixture<CLTensor, CLFFT1D, FFT1DInfo, CLAccessor>;
+using CLFFT2DFixture = FFTFixture<CLTensor, CLFFT2D, FFT2DInfo, CLAccessor>;
TEST_SUITE(CL)
REGISTER_FIXTURE_DATA_TEST_CASE(FFT1D, CLFFT1DFixture, framework::DatasetMode::ALL,
framework::dataset::combine(shapes, data_types));
+REGISTER_FIXTURE_DATA_TEST_CASE(FFT2D, CLFFT2DFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(shapes, data_types));
+
TEST_SUITE_END() // CL
} // namespace benchmark
} // namespace test
diff --git a/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h b/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h
new file mode 100644
index 0000000000..2c53e3ad9b
--- /dev/null
+++ b/tests/benchmark/fixtures/FFTConvolutionLayerFixture.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/Globals.h"
+#include "tests/Utils.h"
+#include "tests/framework/Fixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace benchmark
+{
+/** Fixture that can be used for NEON and CL */
+template <typename TensorType, typename Function, typename Accessor>
+class FFTConvolutionLayerFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, ActivationLayerInfo act_info, DataType data_type,
+ int batches)
+ {
+ ARM_COMPUTE_UNUSED(dilation);
+
+ // Set batched in source and destination shapes
+
+ src_shape.set(3 /* batch */, batches);
+ dst_shape.set(3 /* batch */, batches);
+
+ // Create tensors
+ src = create_tensor<TensorType>(src_shape, data_type, 1);
+ weights = create_tensor<TensorType>(weights_shape, data_type, 1);
+ biases = create_tensor<TensorType>(biases_shape, data_type, 1);
+ dst = create_tensor<TensorType>(dst_shape, data_type, 1);
+
+ // Create and configure function
+ conv_layer.configure(&src, &weights, &biases, &dst, info, act_info);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ biases.allocator()->allocate();
+ dst.allocator()->allocate();
+ }
+
+ void run()
+ {
+ conv_layer.run();
+ }
+
+ void sync()
+ {
+ sync_if_necessary<TensorType>();
+ sync_tensor_if_necessary<TensorType>(dst);
+ }
+
+ void teardown()
+ {
+ src.allocator()->free();
+ weights.allocator()->free();
+ biases.allocator()->free();
+ dst.allocator()->free();
+ }
+
+private:
+ TensorType src{};
+ TensorType weights{};
+ TensorType biases{};
+ TensorType dst{};
+ Function conv_layer{};
+};
+} // namespace benchmark
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_FFT_CONVOLUTION_LAYER_FIXTURE */
diff --git a/tests/benchmark/fixtures/FFTFixture.h b/tests/benchmark/fixtures/FFTFixture.h
index c9c4e3a88e..53897b1b14 100644
--- a/tests/benchmark/fixtures/FFTFixture.h
+++ b/tests/benchmark/fixtures/FFTFixture.h
@@ -36,8 +36,8 @@ namespace test
{
namespace benchmark
{
-template <typename TensorType, typename Function, typename Accessor>
-class FFT1DFixture : public framework::Fixture
+template <typename TensorType, typename Function, typename FFTInfo, typename Accessor>
+class FFTFixture : public framework::Fixture
{
public:
template <typename...>
@@ -48,7 +48,7 @@ public:
dst = create_tensor<TensorType>(shape, data_type, 2);
// Create and configure function
- fft_func.configure(&src, &dst, FFT1DInfo());
+ fft_func.configure(&src, &dst, FFTInfo());
// Allocate tensors
src.allocator()->allocate();
diff --git a/tests/datasets/SmallConvolutionLayerDataset.h b/tests/datasets/SmallConvolutionLayerDataset.h
index 73f1554c49..22d0bc582a 100644
--- a/tests/datasets/SmallConvolutionLayerDataset.h
+++ b/tests/datasets/SmallConvolutionLayerDataset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -132,6 +132,17 @@ public:
}
};
+class SmallFFTConvolutionLayerDataset final : public ConvolutionLayerDataset
+{
+public:
+ SmallFFTConvolutionLayerDataset()
+ {
+ add_config(TensorShape(8U, 7U, 3U), TensorShape(3U, 3U, 3U, 2U), TensorShape(2U), TensorShape(8U, 7U, 2U), PadStrideInfo(1, 1, 1, 1));
+ add_config(TensorShape(64U, 32U, 5U), TensorShape(5U, 5U, 5U, 10U), TensorShape(10U), TensorShape(64U, 32U, 10U), PadStrideInfo(1, 1, 2, 2));
+ add_config(TensorShape(192U, 128U, 8U), TensorShape(9U, 9U, 8U, 3U), TensorShape(3U), TensorShape(192U, 128U, 3U), PadStrideInfo(1, 1, 4, 4));
+ }
+};
+
class SmallConvolutionLayerDataset final : public ConvolutionLayerDataset
{
public:
diff --git a/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h b/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h
new file mode 100644
index 0000000000..b960dceafd
--- /dev/null
+++ b/tests/datasets/system_tests/resnet12/ResNet12ConvolutionLayerDataset.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET
+#define ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET
+
+#include "tests/datasets/ConvolutionLayerDataset.h"
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class ResNet12FFTConvolutionLayerDataset final : public ConvolutionLayerDataset
+{
+public:
+ ResNet12FFTConvolutionLayerDataset()
+ {
+ add_config(TensorShape(192U, 128U, 64U), TensorShape(9U, 9U, 64U, 3U), TensorShape(3U), TensorShape(192U, 128U, 3U), PadStrideInfo(1, 1, 4, 4));
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_RESNET12_CONVOLUTION_LAYER_DATASET */
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 41d2b7bb5e..f1f9b59330 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -46,7 +46,7 @@ namespace validation
namespace
{
constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp
index 0d29532c29..9fdd85b604 100644
--- a/tests/validation/CL/FFT.cpp
+++ b/tests/validation/CL/FFT.cpp
@@ -24,7 +24,10 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
+#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/SmallConvolutionLayerDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
@@ -40,7 +43,7 @@ namespace validation
namespace
{
const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
-const auto shapes = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U),
+const auto shapes_1d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U),
TensorShape(4U, 2U, 3U), TensorShape(5U, 2U, 3U),
TensorShape(7U, 2U, 3U), TensorShape(8U, 2U, 3U),
TensorShape(9U, 2U, 3U), TensorShape(25U, 2U, 3U),
@@ -48,11 +51,27 @@ const auto shapes = framework::dataset::make("TensorShape", { TensorShape(2U
TensorShape(16U, 2U, 3U), TensorShape(32U, 2U, 3U),
TensorShape(96U, 2U, 2U)
});
+const auto shapes_2d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 6U, 3U),
+ TensorShape(4U, 5U, 3U), TensorShape(5U, 7U, 3U),
+ TensorShape(7U, 25U, 3U), TensorShape(8U, 2U, 3U),
+ TensorShape(9U, 16U, 3U), TensorShape(25U, 32U, 3U),
+ TensorShape(192U, 128U, 2U)
+ });
+
+const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
+
+RelativeTolerance<float> tolerance_f32(0.1f); /**< Relative tolerance value for FP32 */
+constexpr float tolerance_num = 0.07f; /**< Tolerance number */
+
} // namespace
TEST_SUITE(CL)
TEST_SUITE(FFT1D)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_types),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_1d, data_types),
shape, data_type)
{
// Create tensors
@@ -81,19 +100,19 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching data types
TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid channels
+ TensorInfo(TensorShape(32U, 13U, 2U), 3, DataType::F32), // Invalid channels
TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Unsupported axis
TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT
TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F16),
TensorInfo(TensorShape(16U, 13U, 2U), 2, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32),
TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32),
})),
- framework::dataset::make("Axis", { 0, 0, 0, 1, 0, 0 })),
+ framework::dataset::make("Axis", { 0, 0, 0, 2, 0, 0 })),
framework::dataset::make("Expected", { false, false, false, false, false, true })),
input_info, output_info, axis, expected)
{
@@ -106,19 +125,103 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// *INDENT-ON*
template <typename T>
-using CLFFT1DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT1D, T>;
+using CLFFT1DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT1D, FFT1DInfo, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes_1d, framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
- validate(CLAccessor(_target), _reference, RelativeTolerance<float>(0.1f), 0.05f);
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
TEST_SUITE_END() // FFT1D
+
+TEST_SUITE(FFT2D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_2d, data_types),
+ shape, data_type)
+{
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(shape, data_type, 2);
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type, 2);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLFFT2D fft2d;
+ fft2d.configure(&src, &dst, FFT2DInfo());
+
+ // Validate valid region
+ const ValidRegion valid_region = shape_to_valid_region(shape);
+ validate(src.info()->valid_region(), valid_region);
+ validate(dst.info()->valid_region(), valid_region);
+
+ // Validate padding
+ validate(src.info()->padding(), PaddingSize());
+ validate(dst.info()->padding(), PaddingSize());
+}
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching data types
+ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(32U, 25U, 2U), 3, DataType::F32), // Invalid channels
+ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT
+ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32),
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F16),
+ TensorInfo(TensorShape(16U, 25U, 2U), 2, DataType::F32),
+ TensorInfo(TensorShape(32U, 25U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
+ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32),
+ })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
+ input_info, output_info, expected)
+{
+ const Status s = CLFFT2D::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), FFT2DInfo());
+ ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using CLFFT2DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT2D, FFT2DInfo, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT2DFixture<float>, framework::DatasetMode::ALL, combine(shapes_2d, framework::dataset::make("DataType", DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFT2D
+
+TEST_SUITE(FFTConvolutionLayer)
+
+template <typename T>
+using CLFFTConvolutionLayerFixture = FFTConvolutionValidationFixture<CLTensor, CLAccessor, CLFFTConvolutionLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFTConvolutionLayer
+
TEST_SUITE_END() // CL
} // namespace validation
} // namespace test
diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp
index c8474e97e6..79308c8229 100644
--- a/tests/validation/CL/ReductionOperation.cpp
+++ b/tests/validation/CL/ReductionOperation.cpp
@@ -63,7 +63,7 @@ TEST_SUITE(ReductionOperation)
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
- TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
+ TensorInfo(TensorShape(128U, 64U), 3, DataType::F32), // Number of Input channels != 1
TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != QASYMM8/F16/F32
TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
TensorInfo(TensorShape(128U, 64U), 1, DataType::QASYMM8), // Axis == 0 and SUM_SQUARE and QASYMM8
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 795b9de6cd..52fa8da60b 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 8e3c01eaff..1aaa5965b2 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -31,6 +31,8 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
#include "tests/validation/reference/DFT.h"
#include <random>
@@ -41,7 +43,7 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename InfoType, typename T>
class FFTValidationFixture : public framework::Fixture
{
public:
@@ -68,8 +70,8 @@ protected:
TensorType dst = create_tensor<TensorType>(shape, data_type, 2);
// Create and configure function
- FunctionType fft1d;
- fft1d.configure(&src, &dst, FFT1DInfo());
+ FunctionType fft;
+ fft.configure(&src, &dst, InfoType());
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -85,7 +87,7 @@ protected:
fill(AccessorType(src));
// Compute function
- fft1d.run();
+ fft.run();
return dst;
}
@@ -97,12 +99,138 @@ protected:
// Fill reference
fill(src);
+ if(std::is_same<InfoType, FFT1DInfo>::value)
+ {
+ return reference::dft_1d(src, reference::FFTDirection::Forward);
+ }
+ else
+ {
+ return reference::dft_2d(src, reference::FFTDirection::Forward);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FFTConvolutionValidationGenericFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ {
+ _data_type = data_type;
+ _data_layout = data_layout;
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
+ const Size2D &dilation, const ActivationLayerInfo act_info)
+ {
+ ARM_COMPUTE_UNUSED(dilation);
+ ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
+
+ if(_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+
+ // Create and configure function
+ FunctionType conv;
+ conv.configure(&src, &weights, &bias, &dst, info, act_info);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src), 0);
+ fill(AccessorType(weights), 1);
+ fill(AccessorType(bias), 2);
+
+ // Compute convolution function
+ conv.run();
+
+ return dst;
+ }
- return reference::dft_1d(src, reference::FFTDirection::Forward);
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
+ const Size2D &dilation, const ActivationLayerInfo act_info)
+ {
+ ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
+
+ // Create reference
+ SimpleTensor<T> src{ input_shape, _data_type, 1 };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1 };
+ SimpleTensor<T> bias{ bias_shape, _data_type, 1 };
+
+ // Fill reference
+ fill(src, 0);
+ fill(weights, 1);
+ fill(bias, 2);
+
+ return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation), act_info) : reference::convolution_layer<T>(src,
+ weights, bias, output_shape, info, dilation);
}
TensorType _target{};
SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ {
+ FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
+ data_type, data_layout, act_info);
+ }
};
} // namespace validation
} // namespace test