diff options
author | Pablo Tello <pablo.tello@arm.com> | 2018-06-14 15:35:49 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:53:09 +0000 |
commit | 7282d562d459066dff3e27fd5299f71e0809990d (patch) | |
tree | ed13231d9d4a2eecbc5d1a6227bff0effbd10c95 /tests | |
parent | 542e92d95536f2ab7fc6f1cc1aa1bd4f1d471212 (diff) | |
download | ComputeLibrary-7282d562d459066dff3e27fd5299f71e0809990d.tar.gz |
COMPMID-1287: Extending NEWinogradLayer test suite
Added NHWC to the dataset to the validation tests
Fixed a problem in the output transform which made the Activation to fail
because way/ordering the output transform wrote the data to the output tensor.
Change-Id: I9609f86605dbfef70b47a0fb043287bf0e5d675b
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136015
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/validation/CL/Winograd.cpp | 30 | ||||
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 5 | ||||
-rw-r--r-- | tests/validation/NEON/ConvolutionLayer.cpp | 21 | ||||
-rw-r--r-- | tests/validation/fixtures/WinogradConvolutionLayerFixture.h | 26 |
4 files changed, 51 insertions, 31 deletions
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp index 59fe25db73..e0b4b5f795 100644 --- a/tests/validation/CL/Winograd.cpp +++ b/tests/validation/CL/Winograd.cpp @@ -379,18 +379,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( using CLWinogradConvolutionLayerFastMathFixture = WinogradConvolutionLayerFastMathValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, float>; TEST_SUITE(Conv3x3) FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(), - framework::dataset::make("DataType", { DataType::F32 })), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) + combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })), + framework::dataset::make("DataLayout", { DataLayout::NCHW }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY, - combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(), - framework::dataset::make("DataType", { DataType::F32 })), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) + combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })), + framework::dataset::make("DataLayout", { DataLayout::NCHW }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32); @@ -399,18 +401,22 @@ TEST_SUITE_END() // Conv3x3 TEST_SUITE(Conv5x5) FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT, - combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(), - framework::dataset::make("DataType", { DataType::F32 })), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) + combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })), + framework::dataset::make("DataLayout", { DataLayout::NCHW }))) + { // Validate output validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY, - combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(), - framework::dataset::make("DataType", { DataType::F32 })), - framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() }))) + combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })), + framework::dataset::make("DataLayout", { DataLayout::NCHW }))) + { // Validate output validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32); diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index e77fe5a295..868683259c 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -193,12 +193,13 @@ TEST_SUITE_END() TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", DataType::F32))) + { // Validate output validate(Accessor(_target), _reference, tolerance(_data_type, _function)); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), + framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(Accessor(_target), _reference, tolerance(_data_type, _function)); diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index b286eb61b6..2c17487107 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -121,21 +121,26 @@ template <typename T> using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, false>; TEST_SUITE(FP32) + FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, - combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(), - datasets::SmallWinogradConvolutionLayer5x5Dataset()), - framework::dataset::make("DataType", { DataType::F32 })), - ActivationFunctionsDataset)) + combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(), + datasets::SmallWinogradConvolutionLayer5x5Dataset()), + framework::dataset::make("DataType", { DataType::F32 })), + ActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) + { // Validate output validate(Accessor(_target), _reference, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT, - combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(), - datasets::SmallWinogradConvolutionLayer5x5Dataset()), - framework::dataset::make("DataType", { DataType::F32 })), - ActivationFunctionsDataset)) + combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(), + datasets::SmallWinogradConvolutionLayer5x5Dataset()), + framework::dataset::make("DataType", { DataType::F32 })), + ActivationFunctionsDataset), + + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference, abs_tolerance_f32); diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h index 07795c2361..aca24f13ae 100644 --- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h +++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h @@ -160,11 +160,12 @@ class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixt { public: template <typename...> - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info) + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, + DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout) + { ARM_COMPUTE_UNUSED(dilation); - - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); } @@ -189,14 +190,21 @@ protected: } } - TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, ActivationLayerInfo act_info) + TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info, + DataType data_type, ActivationLayerInfo act_info, const DataLayout data_layout) { + if(data_layout == DataLayout::NHWC) + { + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + } + // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1); - TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, 0, QuantizationInfo(), data_layout); + TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, 0, QuantizationInfo(), data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout); // Create and configure function FunctionType conv; |