aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/Winograd.cpp
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-03-22 11:24:56 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit247f52cfe337f7b2542b900e3d8cf122e9d4f11c (patch)
treebcbabb7f1eea588a5d37566829763506d328e7a9 /tests/validation/CL/Winograd.cpp
parenteb8a399ba655b85c6854676832eb11b0af4108fe (diff)
downloadComputeLibrary-247f52cfe337f7b2542b900e3d8cf122e9d4f11c.tar.gz
COMPMID-1013 - Create WinogradInfo data structure
COMPMID-1014 - Refactoring Winograd's dataset Change-Id: I6abdcbf9a90d663f4db666cd410afece9f1d034d Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125899 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/CL/Winograd.cpp')
-rw-r--r--tests/validation/CL/Winograd.cpp181
1 files changed, 80 insertions, 101 deletions
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 9aba8f776c..8fa5826470 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -35,7 +35,6 @@
#include "tests/datasets/LargeConvolutionLayerDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/datasets/SmallConvolutionLayerDataset.h"
-#include "tests/datasets/WinogradFilterTransformDataset.h"
#include "tests/datasets/WinogradInputTransformDataset.h"
#include "tests/datasets/WinogradOutputTransformDataset.h"
#include "tests/framework/Asserts.h"
@@ -64,7 +63,7 @@ TEST_SUITE(InputTransform)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo",{
TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16), // F16 not supported
TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
@@ -83,44 +82,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(7U, 320U, 16U, 3U), 1, DataType::F32),
TensorInfo(TensorShape(37U, 304U, 16U), 1, DataType::F32)
})),
- framework::dataset::make("PadStrideInfo", {
- PadStrideInfo(1, 1, 1, 0),
- PadStrideInfo(1, 1, 0, 0),
- PadStrideInfo(1, 1, 1, 1),
- PadStrideInfo(2, 1, 1, 1),
- PadStrideInfo(1, 1, 0, 1),
- PadStrideInfo(1, 1, 0, 0),
- PadStrideInfo(1, 1, 1, 1)
- })),
- framework::dataset::make("KernelDims", {
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(5U, 5U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U)
+ framework::dataset::make("WinogradInfo", {
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 0), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(2, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 33U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(34U, 42U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(31U, 37U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW)
})),
framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
- input_info, output_info, conv_info, kernel_dims, expected)
+ input_info, output_info, winograd_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, kernel_dims)) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
using CLWinogradInputTransformFixture = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset(), datasets::LargeWinogradInputTransformDataset()),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset(), datasets::LargeWinogradInputTransformDataset()),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })),
- shape_in, conv_info, kernel_dims, is_nchw_format, data_type)
+ shape_in, winograd_info, data_layout, data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
-
- TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), conv_info, kernel_dims);
+ TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), winograd_info);
// Create tensors
- CLTensor in = create_tensor<CLTensor>(shape_in, data_type);
+ CLTensor in = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -130,15 +119,19 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
CLWinogradInputTransform winograd_input_transform;
// Configure the function
- winograd_input_transform.configure(&in, &out, conv_info, kernel_dims);
+ winograd_input_transform.configure(&in, &out, winograd_info);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallWinogradInputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallWinogradInputTransformDataset(),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataType", { DataType::F32 })))
{
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradInputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradInputTransformDataset(),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataType", { DataType::F32 })))
{
validate(CLAccessor(_target), _reference);
}
@@ -166,19 +159,19 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32),
TensorInfo(TensorShape(22U, 37U, 36U), 1, DataType::F32)
})),
- framework::dataset::make("OutputTile", {
- Size2D(2U, 2U),
- Size2D(2U, 2U),
- Size2D(2U, 2U),
- Size2D(3U, 3U),
- Size2D(2U, 2U),
- Size2D(2U, 2U),
- Size2D(4U, 4U)
- })),
+ framework::dataset::make("WinogradInfo", {
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(3U, 3U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(4U, 4U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ )
+ })),
framework::dataset::make("Expected", { false, false, false, false, true, true, true })),
- input_info, output_info, output_tile, expected)
+ input_info, output_info, winograd_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), output_tile)) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
@@ -186,36 +179,40 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()),
- framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::Small3x3Shapes(), datasets::Large3x3Shapes()),
+ framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })),
- shape_a, is_nchw_format, output_tile, data_type)
+ shape_a, output_tile, data_layout, data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
+ WinogradInfo winograd_info(output_tile, Size2D(shape_a[0], shape_a[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
- TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), output_tile);
+ TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type);
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
CLWinogradFilterTransform winograd_filter_transform;
- winograd_filter_transform.configure(&a, &b, output_tile);
+ winograd_filter_transform.configure(&a, &b, winograd_info);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small3x3Shapes(),
+ framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradFilterTransformDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::Large3x3Shapes(),
framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })))
{
// Validate output
@@ -227,65 +224,47 @@ TEST_SUITE_END() // FilterTransform
TEST_SUITE(OutputTransform)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
framework::dataset::make("InputInfo",{
- TensorInfo(TensorShape(24U, 49U, 16U, 5U), 1, DataType::F16), // F16 not supported
- TensorInfo(TensorShape(128U, 3136U, 16U, 5U), 1, DataType::QASYMM8), // QASYMM8 not supported
- TensorInfo(TensorShape(256U, 784U, 16U, 5U), 1, DataType::F32), // Kernel size not supported
- TensorInfo(TensorShape(512U, 169U, 16U, 5U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(13U, 6U, 16U, 4U), 1, DataType::F32), // Padding needed
- TensorInfo(TensorShape(7U, 16U, 16U, 7U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(1U, 442U, 16U, 37U), 1, DataType::F32) // Wrong number of tiles
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F16), // F16 not supported
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::QASYMM8), // QASYMM8 not supported
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32), // Kernel size not supported
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32), // Valid
+ TensorInfo(TensorShape(13U, 108U, 16U, 4U), 1, DataType::F32), // Padding needed
+ TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32), // Valid
+ TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32) // Wrong WinogradInfo
}),
framework::dataset::make("BiasInfo", {
- TensorInfo(TensorShape(24U), 1, DataType::F16),
- TensorInfo(TensorShape(128U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(256U), 1, DataType::F32),
+ TensorInfo(TensorShape(512U), 1, DataType::F16),
+ TensorInfo(TensorShape(512U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(512U), 1, DataType::F32),
TensorInfo(TensorShape(512U), 1, DataType::F32),
TensorInfo(TensorShape(13U), 1, DataType::F32),
TensorInfo(TensorShape(7U), 1, DataType::F32),
- TensorInfo(TensorShape(1U), 1, DataType::F32)
+ TensorInfo(TensorShape(7U), 1, DataType::F32)
})),
framework::dataset::make("OutputInfo", {
- TensorInfo(TensorShape(14U, 14U, 24U, 5U), 1, DataType::F16),
- TensorInfo(TensorShape(112U, 112U, 128U, 5U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(55U, 55U, 256U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(26U, 26U, 512U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(5U, 4U, 13U, 4U), 1, DataType::F32),
- TensorInfo(TensorShape(8U, 8U, 7U, 7U), 1, DataType::F32),
- TensorInfo(TensorShape(51U, 33U, 1U, 37U), 1, DataType::F32)
- })),
- framework::dataset::make("KernelDims", {
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(5U, 5U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U)
- })),
- framework::dataset::make("OutputDims", {
- Size2D(14U, 14U),
- Size2D(112U, 112U),
- Size2D(55U, 55U),
- Size2D(26U, 26U),
- Size2D(5U, 4U),
- Size2D(8U, 8U),
- Size2D(51U, 33U)
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F16),
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(17U, 23U, 13U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(8U, 10U, 7U, 7U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 9U, 7U, 7U), 1, DataType::F32)
})),
- framework::dataset::make("NumTiles", {
- Size2D(7U, 7U),
- Size2D(56U, 56U),
- Size2D(28U, 28U),
- Size2D(13U, 13U),
- Size2D(3U, 2U),
- Size2D(4U, 4U),
- Size2D(26U, 16U)
+ framework::dataset::make("WinogradInfo", {
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(5U, 5U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(17U, 23U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 3U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
})),
framework::dataset::make("Expected", { false, false, false, true, false, true, false })),
- input_info, bias_info, output_info, kernel_dims, output_dims, num_tiles, expected)
+ input_info, bias_info, output_info, winograd_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), kernel_dims, output_dims, num_tiles)) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
@@ -295,9 +274,9 @@ using CLWinogradOutputTransformFixture = WinogradOutputTransformValidationFixtur
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradOutputTransformDataset(), datasets::LargeWinogradOutputTransformDataset()),
framework::dataset::make("DataType", { DataType::F32 })),
- shape_a, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type)
+ shape_a, winograd_info, data_type)
{
- TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), output_convolved_dims, data_layout);
+ TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
// Create tensors
CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
@@ -308,7 +287,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create and configure function
CLWinogradOutputTransform winograd_output_transform;
- winograd_output_transform.configure(&a, nullptr, &b, kernel_dims, output_convolved_dims, num_tiles);
+ winograd_output_transform.configure(&a, nullptr, &b, winograd_info);
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradOutputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))