aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-03-22 11:24:56 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit247f52cfe337f7b2542b900e3d8cf122e9d4f11c (patch)
treebcbabb7f1eea588a5d37566829763506d328e7a9 /tests/validation
parenteb8a399ba655b85c6854676832eb11b0af4108fe (diff)
downloadComputeLibrary-247f52cfe337f7b2542b900e3d8cf122e9d4f11c.tar.gz
COMPMID-1013 - Create WinogradInfo data structure
COMPMID-1014 - Refactoring Winograd's dataset Change-Id: I6abdcbf9a90d663f4db666cd410afece9f1d034d Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125899 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/Winograd.cpp181
-rw-r--r--tests/validation/fixtures/WinogradLayerFixture.h85
-rw-r--r--tests/validation/reference/Winograd.cpp504
-rw-r--r--tests/validation/reference/Winograd.h14
4 files changed, 400 insertions, 384 deletions
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 9aba8f776c..8fa5826470 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -35,7 +35,6 @@
#include "tests/datasets/LargeConvolutionLayerDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/datasets/SmallConvolutionLayerDataset.h"
-#include "tests/datasets/WinogradFilterTransformDataset.h"
#include "tests/datasets/WinogradInputTransformDataset.h"
#include "tests/datasets/WinogradOutputTransformDataset.h"
#include "tests/framework/Asserts.h"
@@ -64,7 +63,7 @@ TEST_SUITE(InputTransform)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo",{
TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16), // F16 not supported
TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
@@ -83,44 +82,34 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(7U, 320U, 16U, 3U), 1, DataType::F32),
TensorInfo(TensorShape(37U, 304U, 16U), 1, DataType::F32)
})),
- framework::dataset::make("PadStrideInfo", {
- PadStrideInfo(1, 1, 1, 0),
- PadStrideInfo(1, 1, 0, 0),
- PadStrideInfo(1, 1, 1, 1),
- PadStrideInfo(2, 1, 1, 1),
- PadStrideInfo(1, 1, 0, 1),
- PadStrideInfo(1, 1, 0, 0),
- PadStrideInfo(1, 1, 1, 1)
- })),
- framework::dataset::make("KernelDims", {
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(5U, 5U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U)
+ framework::dataset::make("WinogradInfo", {
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 0), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(2, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 33U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(34U, 42U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
+ WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(31U, 37U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW)
})),
framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
- input_info, output_info, conv_info, kernel_dims, expected)
+ input_info, output_info, winograd_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, kernel_dims)) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
using CLWinogradInputTransformFixture = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset(), datasets::LargeWinogradInputTransformDataset()),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset(), datasets::LargeWinogradInputTransformDataset()),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })),
- shape_in, conv_info, kernel_dims, is_nchw_format, data_type)
+ shape_in, winograd_info, data_layout, data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
-
- TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), conv_info, kernel_dims);
+ TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), winograd_info);
// Create tensors
- CLTensor in = create_tensor<CLTensor>(shape_in, data_type);
+ CLTensor in = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -130,15 +119,19 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
CLWinogradInputTransform winograd_input_transform;
// Configure the function
- winograd_input_transform.configure(&in, &out, conv_info, kernel_dims);
+ winograd_input_transform.configure(&in, &out, winograd_info);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallWinogradInputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallWinogradInputTransformDataset(),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataType", { DataType::F32 })))
{
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradInputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradInputTransformDataset(),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataType", { DataType::F32 })))
{
validate(CLAccessor(_target), _reference);
}
@@ -166,19 +159,19 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32),
TensorInfo(TensorShape(22U, 37U, 36U), 1, DataType::F32)
})),
- framework::dataset::make("OutputTile", {
- Size2D(2U, 2U),
- Size2D(2U, 2U),
- Size2D(2U, 2U),
- Size2D(3U, 3U),
- Size2D(2U, 2U),
- Size2D(2U, 2U),
- Size2D(4U, 4U)
- })),
+ framework::dataset::make("WinogradInfo", {
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(3U, 3U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ ),
+ WinogradInfo(Size2D(4U, 4U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */ )
+ })),
framework::dataset::make("Expected", { false, false, false, false, true, true, true })),
- input_info, output_info, output_tile, expected)
+ input_info, output_info, winograd_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), output_tile)) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
@@ -186,36 +179,40 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()),
- framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::Small3x3Shapes(), datasets::Large3x3Shapes()),
+ framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })),
- shape_a, is_nchw_format, output_tile, data_type)
+ shape_a, output_tile, data_layout, data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
+ WinogradInfo winograd_info(output_tile, Size2D(shape_a[0], shape_a[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
- TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), output_tile);
+ TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
// Create tensors
- CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
- CLTensor b = create_tensor<CLTensor>(shape_b, data_type);
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, 0, QuantizationInfo(), data_layout);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
CLWinogradFilterTransform winograd_filter_transform;
- winograd_filter_transform.configure(&a, &b, output_tile);
+ winograd_filter_transform.configure(&a, &b, winograd_info);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small3x3Shapes(),
+ framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradFilterTransformDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::Large3x3Shapes(),
framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })))
{
// Validate output
@@ -227,65 +224,47 @@ TEST_SUITE_END() // FilterTransform
TEST_SUITE(OutputTransform)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
framework::dataset::make("InputInfo",{
- TensorInfo(TensorShape(24U, 49U, 16U, 5U), 1, DataType::F16), // F16 not supported
- TensorInfo(TensorShape(128U, 3136U, 16U, 5U), 1, DataType::QASYMM8), // QASYMM8 not supported
- TensorInfo(TensorShape(256U, 784U, 16U, 5U), 1, DataType::F32), // Kernel size not supported
- TensorInfo(TensorShape(512U, 169U, 16U, 5U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(13U, 6U, 16U, 4U), 1, DataType::F32), // Padding needed
- TensorInfo(TensorShape(7U, 16U, 16U, 7U), 1, DataType::F32), // Valid
- TensorInfo(TensorShape(1U, 442U, 16U, 37U), 1, DataType::F32) // Wrong number of tiles
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F16), // F16 not supported
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::QASYMM8), // QASYMM8 not supported
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32), // Kernel size not supported
+ TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32), // Valid
+ TensorInfo(TensorShape(13U, 108U, 16U, 4U), 1, DataType::F32), // Padding needed
+ TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32), // Valid
+ TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32) // Wrong WinogradInfo
}),
framework::dataset::make("BiasInfo", {
- TensorInfo(TensorShape(24U), 1, DataType::F16),
- TensorInfo(TensorShape(128U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(256U), 1, DataType::F32),
+ TensorInfo(TensorShape(512U), 1, DataType::F16),
+ TensorInfo(TensorShape(512U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(512U), 1, DataType::F32),
TensorInfo(TensorShape(512U), 1, DataType::F32),
TensorInfo(TensorShape(13U), 1, DataType::F32),
TensorInfo(TensorShape(7U), 1, DataType::F32),
- TensorInfo(TensorShape(1U), 1, DataType::F32)
+ TensorInfo(TensorShape(7U), 1, DataType::F32)
})),
framework::dataset::make("OutputInfo", {
- TensorInfo(TensorShape(14U, 14U, 24U, 5U), 1, DataType::F16),
- TensorInfo(TensorShape(112U, 112U, 128U, 5U), 1, DataType::QASYMM8),
- TensorInfo(TensorShape(55U, 55U, 256U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(26U, 26U, 512U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(5U, 4U, 13U, 4U), 1, DataType::F32),
- TensorInfo(TensorShape(8U, 8U, 7U, 7U), 1, DataType::F32),
- TensorInfo(TensorShape(51U, 33U, 1U, 37U), 1, DataType::F32)
- })),
- framework::dataset::make("KernelDims", {
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(5U, 5U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U),
- Size2D(3U, 3U)
- })),
- framework::dataset::make("OutputDims", {
- Size2D(14U, 14U),
- Size2D(112U, 112U),
- Size2D(55U, 55U),
- Size2D(26U, 26U),
- Size2D(5U, 4U),
- Size2D(8U, 8U),
- Size2D(51U, 33U)
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F16),
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(17U, 23U, 13U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(8U, 10U, 7U, 7U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 9U, 7U, 7U), 1, DataType::F32)
})),
- framework::dataset::make("NumTiles", {
- Size2D(7U, 7U),
- Size2D(56U, 56U),
- Size2D(28U, 28U),
- Size2D(13U, 13U),
- Size2D(3U, 2U),
- Size2D(4U, 4U),
- Size2D(26U, 16U)
+ framework::dataset::make("WinogradInfo", {
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(5U, 5U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(17U, 23U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+ WinogradInfo(Size2D(2U, 3U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
})),
framework::dataset::make("Expected", { false, false, false, true, false, true, false })),
- input_info, bias_info, output_info, kernel_dims, output_dims, num_tiles, expected)
+ input_info, bias_info, output_info, winograd_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), kernel_dims, output_dims, num_tiles)) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
@@ -295,9 +274,9 @@ using CLWinogradOutputTransformFixture = WinogradOutputTransformValidationFixtur
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradOutputTransformDataset(), datasets::LargeWinogradOutputTransformDataset()),
framework::dataset::make("DataType", { DataType::F32 })),
- shape_a, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type)
+ shape_a, winograd_info, data_type)
{
- TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), output_convolved_dims, data_layout);
+ TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
// Create tensors
CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
@@ -308,7 +287,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Create and configure function
CLWinogradOutputTransform winograd_output_transform;
- winograd_output_transform.configure(&a, nullptr, &b, kernel_dims, output_convolved_dims, num_tiles);
+ winograd_output_transform.configure(&a, nullptr, &b, winograd_info);
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradOutputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradLayerFixture.h
index 481eb93e80..17229cac25 100644
--- a/tests/validation/fixtures/WinogradLayerFixture.h
+++ b/tests/validation/fixtures/WinogradLayerFixture.h
@@ -142,8 +142,9 @@ protected:
fill(bias, 2, 0.f, 0.f);
}
- return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info), act_info) : reference::convolution_layer<T>(src, weights, bias,
- output_shape, info);
+ SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
+
+ return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
}
TensorType _target{};
@@ -155,12 +156,12 @@ class WinogradInputTransformValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape input_shape, PadStrideInfo conv_info, Size2D kernel_dims, bool is_nchw_format, DataType data_type)
+ void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
- TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), conv_info, kernel_dims);
+ TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
- _target = compute_target(input_shape, output_shape, conv_info, kernel_dims, is_nchw_format, data_type);
- _reference = compute_reference(input_shape, output_shape, conv_info, kernel_dims, is_nchw_format, data_type);
+ _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
}
protected:
@@ -184,16 +185,14 @@ protected:
}
}
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims, bool is_nchw_format, DataType data_type)
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
-
- TensorType src = create_tensor<TensorType>(input_shape, data_type);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
// Create and configure function
FunctionType transf;
- transf.configure(&src, &dst, conv_info, kernel_dims);
+ transf.configure(&src, &dst, winograd_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -208,23 +207,21 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- // Compute CLWinogradInputTransform function
+ // Compute Winograd input transform function
transf.run();
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims, bool is_nchw_format, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
-
// Create reference
- SimpleTensor<T> src{ input_shape, data_type };
+ SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
// Fill reference
fill(src, 0, -1.f, 1.f);
- return reference::winograd_input_transform<T>(src, output_shape, conv_info, kernel_dims);
+ return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
}
TensorType _target{};
@@ -236,12 +233,13 @@ class WinogradFilterTransformValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape input_shape, bool is_nchw_format, Size2D output_tile, DataType data_type)
+ void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
{
- TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), output_tile);
+ WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
+ TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
- _target = compute_target(input_shape, output_shape, is_nchw_format, output_tile, data_type);
- _reference = compute_reference(input_shape, output_shape, is_nchw_format, output_tile, data_type);
+ _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
}
protected:
@@ -265,17 +263,15 @@ protected:
}
}
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, const Size2D &output_tile, DataType data_type)
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
-
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
// Create and configure function
FunctionType filter_transform;
- filter_transform.configure(&src, &dst, output_tile);
+ filter_transform.configure(&src, &dst, winograd_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -295,17 +291,15 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, const Size2D &output_tile, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
{
- ARM_COMPUTE_UNUSED(is_nchw_format);
-
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1 };
+ SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
// Fill reference
fill(src, 0, -1.f, 1.f);
- return reference::winograd_filter_transform<T>(src, output_shape, output_tile);
+ return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
}
TensorType _target{};
@@ -317,12 +311,12 @@ class WinogradOutputTransformValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape input_shape, Size2D kernel_dims, Size2D output_convolved_dims, Size2D num_tiles, DataLayout data_layout, DataType data_type)
+ void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type)
{
- TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), output_convolved_dims, data_layout);
+ TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
- _target = compute_target(input_shape, output_shape, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type);
+ _target = compute_target(input_shape, output_shape, winograd_info, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
@@ -346,16 +340,15 @@ protected:
}
}
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &output_convolved_dims, Size2D &num_tiles, DataLayout data_layout,
- DataType data_type)
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+ TensorType src = create_tensor<TensorType>(input_shape, data_type);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
// Create and configure function
FunctionType output_transform;
- output_transform.configure(&src, nullptr, &dst, kernel_dims, output_convolved_dims, num_tiles);
+ output_transform.configure(&src, nullptr, &dst, winograd_info);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -375,17 +368,15 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &output_convolved_dims, Size2D &num_tiles,
- DataLayout data_layout,
- DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
+ SimpleTensor<T> src{ input_shape, data_type };
// Fill reference
fill(src, 0, -1.f, 1.f);
- return reference::winograd_output_transform<T>(src, output_shape, kernel_dims, num_tiles);
+ return reference::winograd_output_transform<T>(src, output_shape, winograd_info);
}
TensorType _target{};
diff --git a/tests/validation/reference/Winograd.cpp b/tests/validation/reference/Winograd.cpp
index ad0dcbd958..604e25214b 100644
--- a/tests/validation/reference/Winograd.cpp
+++ b/tests/validation/reference/Winograd.cpp
@@ -28,6 +28,8 @@
#include "arm_compute/core/Types.h"
+#include <algorithm>
+
namespace arm_compute
{
namespace test
@@ -39,153 +41,155 @@ namespace reference
namespace
{
template <typename T>
-void winograd_filter_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &out, const Size2D &output_tile)
+void initialize_matrix_transform(SimpleTensor<T> &src, const Size2D &output_tile_size, const Size2D &kernel_size, WinogradTransformType winograd_transform_type)
{
- const bool is_2x2 = (output_tile.width == 2);
- const unsigned int transf_side = is_2x2 ? 4u : 6u;
+ ARM_COMPUTE_ERROR_ON((output_tile_size != Size2D(2U, 2U)) && (output_tile_size != Size2D(4U, 4U)));
+ ARM_COMPUTE_ERROR_ON(kernel_size != Size2D(3U, 3U));
- // Simple tensor for the 3x3 input tile
- SimpleTensor<T> input_tile{ TensorShape(3u, 3u), in.data_type(), 1 };
+ // Winograd input transform matrices
+ static const float imatrix2x2_3x3[] =
+ {
+ 1.0f, 0.0f, -1.0f, 0.0f,
+ 0.0f, 1.0f, 1.0f, 0.0f,
+ 0.0f, -1.0f, 1.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, -1.0f
+ };
- // Simple tensor for the transformation matrix
- SimpleTensor<T> trans_matrix{ TensorShape(3u, transf_side), in.data_type(), 1 };
+ static const float imatrix4x4_3x3[] =
+ {
+ 4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, -4.0f, -4.0f, 1.0f, 1.0f, 0.0f,
+ 0.0f, 4.0f, -4.0f, -1.0f, 1.0f, 0.0f,
+ 0.0f, -2.0f, -1.0f, 2.0f, 1.0f, 0.0f,
+ 0.0f, 2.0f, -1.0f, -2.0f, 1.0f, 0.0f,
+ 0.0f, 4.0f, 0.0f, -5.0f, 0.0f, 1.0f,
+ };
+
+ // ------------------------------------------
+
+ // Winograd filter transform matrices
+ static const float fmatrix2x2_3x3[] =
+ {
+ 1.0f, 0.0f, 0.0f,
+ 0.5f, 0.5f, 0.5f,
+ 0.5f, -0.5f, 0.5f,
+ 0.0f, 0.0f, 1.0f
+ };
- // Simple tensor for the transformation matrix transpose
- SimpleTensor<T> trans_matrix_transposed{ TensorShape(transf_side, 3u), in.data_type(), 1 };
+ static const float fmatrix4x4_3x3[] =
+ {
+ 0.25f, 0.0f, 0.0f,
+ -1.0f / 6.0f, -1.0f / 6.0f, -1.0f / 6.0f,
+ -1.0f / 6.0f, 1.0f / 6.0f, -1.0f / 6.0f,
+ 1.0f / 24.0f, 1.0f / 12.0f, 1.0f / 6.0f,
+ 1.0f / 24.0f, -1.0f / 12.0f, 1.0f / 6.0f,
+ 0.0f, 0.0f, 1.0f
+ };
+
+ // ------------------------------------------
+
+ // Winograd output transform matrices
+ static const float omatrix2x2_3x3[] =
+ {
+ 1.0f, 1.0f, 1.0f, 0.0f,
+ 0.0f, 1.0f, -1.0f, -1.0f
+ };
+
+ static const float omatrix4x4_3x3[] =
+ {
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
+ 0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f,
+ 0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f,
+ 0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f
+ };
- // Simple tensor for the 3xSide temporary tile
- SimpleTensor<T> tmp_tile{ TensorShape(3u, transf_side), in.data_type(), 1 };
+ // ------------------------------------------
- // Simple tensor for the SidexSide output tile
- SimpleTensor<T> transf_tile{ TensorShape(transf_side, transf_side), in.data_type(), 1 };
+ using WinogradKey = std::tuple<std::pair<int, int>, std::pair<int, int>, WinogradTransformType>;
- if(is_2x2)
+ // Key = (Output tile size, Kernel size, Winograd transform type)
+ static std::map<WinogradKey, const float *> matrix_map =
{
- // Initialize 3x4 transformation matrix
- // 1 | 0 | 0
- // 0.5 | 0.5 | 0.5
- // 0.5 |-0.5 | 0.5
- // 0 | 0 | 1
- trans_matrix[0 + 0 * 3] = 1.0f;
- trans_matrix[1 + 0 * 3] = 0.0f;
- trans_matrix[2 + 0 * 3] = 0.0f;
- trans_matrix[0 + 1 * 3] = 0.5f;
- trans_matrix[1 + 1 * 3] = 0.5f;
- trans_matrix[2 + 1 * 3] = 0.5f;
- trans_matrix[0 + 2 * 3] = 0.5f;
- trans_matrix[1 + 2 * 3] = -0.5f;
- trans_matrix[2 + 2 * 3] = 0.5f;
- trans_matrix[0 + 3 * 3] = 0.0f;
- trans_matrix[1 + 3 * 3] = 0.0f;
- trans_matrix[2 + 3 * 3] = 1.0f;
+ { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::INPUT), imatrix2x2_3x3 },
+ { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::INPUT), imatrix4x4_3x3 },
+ { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::FILTER), fmatrix2x2_3x3 },
+ { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::FILTER), fmatrix4x4_3x3 },
+ { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::OUTPUT), omatrix2x2_3x3 },
+ { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::OUTPUT), omatrix4x4_3x3 },
+ };
+
+ // Find input matrix transform
+ std::map<WinogradKey, const float *>::iterator it;
+
+ it = matrix_map.find(WinogradKey(std::pair<int, int>(output_tile_size.width, output_tile_size.height),
+ std::pair<int, int>(kernel_size.width, kernel_size.height),
+ winograd_transform_type));
+
+ float const *matrix_values = nullptr;
+ if(it != matrix_map.end())
+ {
+ // Get matrix pointer
+ matrix_values = it->second;
}
else
{
- // Initialize 3x6 transformation matrix
- // 1/4 | 0 | 0
- // -1/6 | -1/6 | -1/6
- // -1/6 | 1/6 | -1/6
- // 1/24 | 1/12 | 1/6
- // 1/24 | -1/12 | 1/6
- // 0 | 0 | 1
- trans_matrix[0 + 0 * 3] = 1.0f / 4.0f;
- trans_matrix[1 + 0 * 3] = 0.0f;
- trans_matrix[2 + 0 * 3] = 0.0f;
- trans_matrix[0 + 1 * 3] = -1.0f / 6.0f;
- trans_matrix[1 + 1 * 3] = -1.0f / 6.0f;
- trans_matrix[2 + 1 * 3] = -1.0f / 6.0f;
- trans_matrix[0 + 2 * 3] = -1.0f / 6.0f;
- trans_matrix[1 + 2 * 3] = 1.0f / 6.0f;
- trans_matrix[2 + 2 * 3] = -1.0f / 6.0f;
- trans_matrix[0 + 3 * 3] = 1.0f / 24.0f;
- trans_matrix[1 + 3 * 3] = 1.0f / 12.0f;
- trans_matrix[2 + 3 * 3] = 1.0f / 6.0f;
- trans_matrix[0 + 4 * 3] = 1.0f / 24.0f;
- trans_matrix[1 + 4 * 3] = -1.0f / 12.0f;
- trans_matrix[2 + 4 * 3] = 1.0f / 6.0f;
- trans_matrix[0 + 5 * 3] = 0.0f;
- trans_matrix[1 + 5 * 3] = 0.0f;
- trans_matrix[2 + 5 * 3] = 1.0f;
+ ARM_COMPUTE_ERROR("Winograd configuration not supported");
}
- // Transpose the transformation matrix
- transpose_matrix(trans_matrix, trans_matrix_transposed);
+ // Copy values
+ std::copy(&matrix_values[0], &matrix_values[0] + src.num_elements(), &src[0]);
+}
+} // namespace
- const int num_channels = in.shape()[2];
- const int num_filters = in.shape()[3];
- const int num_batches = in.shape().total_size() / (9 * num_channels * num_filters);
+template <typename T>
+SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
+{
+ ARM_COMPUTE_ERROR_ON(in.data_layout() != DataLayout::NCHW);
- for(int n = 0; n < num_batches; ++n)
- {
- for(int w = 0; w < num_filters; ++w)
- {
- for(int z = 0; z < num_channels; ++z)
- {
- // Load the 3x3 tile from the input tensor
- get_tile(in, input_tile, Coordinates(0, 0, z, w, n));
+ const PadStrideInfo conv_info = winograd_info.convolution_info;
+ const Size2D output_tile_size = winograd_info.output_tile_size;
+ const Size2D kernel_size = winograd_info.kernel_size;
- // First transformation
- matrix_multiply(trans_matrix, input_tile, tmp_tile);
+ SimpleTensor<T> out{ output_shape, in.data_type() };
- // Second transformation
- matrix_multiply(tmp_tile, trans_matrix_transposed, transf_tile);
+ // Calculate dimensions for the tile
+ const unsigned int tile_w = output_tile_size.width + kernel_size.width - 1;
+ const unsigned int tile_h = output_tile_size.height + kernel_size.height - 1;
- // Store the 4x4 output tile across the 16 channels
- const int output_offset = w + z * num_filters;
+ TensorShape tile_dims(tile_w, tile_h);
- for(unsigned int out_h = 0, out_pos = 0; out_h < transf_side; ++out_h)
- {
- for(unsigned int out_w = 0; out_w < transf_side; ++out_w, ++out_pos)
- {
- out[output_offset + out_pos * num_filters * num_channels] = transf_tile[out_w + out_h * transf_side];
- }
- }
- }
- }
- }
-}
+ // Simple tensor for the input tile
+ SimpleTensor<T> src_tile{ tile_dims, in.data_type() };
-template <typename T>
-void winograd_input_transform3x3(const SimpleTensor<T> &src, SimpleTensor<T> &dst, const PadStrideInfo &conv_info)
-{
- TensorShape shape4x4(4u, 4u);
-
- // Simple tensor for the 4x4 input tile
- SimpleTensor<T> src_tile{ shape4x4, src.data_type() };
+ // Simple tensor for the temporary tile
+ SimpleTensor<T> tmp_tile{ tile_dims, in.data_type() };
- // Simple tensor for the 4x4 temporary tile
- SimpleTensor<T> tmp_tile{ shape4x4, src.data_type() };
-
- // Simple tensor for the 4x4 output tile
- SimpleTensor<T> dst_tile{ shape4x4, src.data_type() };
+ // Simple tensor for the output tile
+ SimpleTensor<T> dst_tile{ tile_dims, in.data_type() };
// Simple tensor for the transformation matrix
- SimpleTensor<T> matrix{ shape4x4, src.data_type() };
+ SimpleTensor<T> matrix{ tile_dims, in.data_type() };
// Simple tensor for the transformation matrix transposed
- SimpleTensor<T> matrix_transposed{ shape4x4, src.data_type() };
-
- const float matrix_values[] = { 1.f, 0.f, -1.f, 0.f,
- 0.f, 1.f, 1.f, 0.f,
- 0.f, -1.f, 1.f, 0.f,
- 0.f, 1.f, 0.f, -1.f
- };
+ SimpleTensor<T> matrix_transposed{ tile_dims, in.data_type() };
- for(int i = 0; i < matrix.num_elements(); ++i)
- {
- matrix[i] = matrix_values[i];
- }
+ // Initialize matrix for the input transform
+ initialize_matrix_transform(matrix, output_tile_size, kernel_size, WinogradTransformType::INPUT);
+ // Transpose matrix
transpose_matrix(matrix, matrix_transposed);
- const int in_w = src.shape().x();
- const int in_h = src.shape().y();
- const int in_d = src.shape().z();
- const int num_batches = src.shape().total_size() / (in_w * in_h * in_d);
- const int num_tiles_x = std::ceil((in_w - 2 + conv_info.pad_left() + conv_info.pad_right()) / 2.0f);
- const int num_tiles_y = std::ceil((in_h - 2 + conv_info.pad_top() + conv_info.pad_bottom()) / 2.0f);
+ const int in_w = in.shape().x();
+ const int in_h = in.shape().y();
+ const int in_d = in.shape().z();
+ const int out_d = out.shape().z();
+ const int num_batches = in.shape().total_size() / (in_w * in_h * in_d);
+ const int num_tiles_x = std::ceil((in_w - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / static_cast<float>(output_tile_size.width));
+ const int num_tiles_y = std::ceil((in_h - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / static_cast<float>(output_tile_size.height));
+ const int step_x = output_tile_size.width;
+ const int step_y = output_tile_size.height;
- ARM_COMPUTE_ERROR_ON((num_tiles_x * num_tiles_y) != static_cast<int>(dst.shape().y()));
+ ARM_COMPUTE_ERROR_ON((num_tiles_x * num_tiles_y) != static_cast<int>(out.shape().y()));
for(int b = 0; b < num_batches; ++b)
{
@@ -195,61 +199,154 @@ void winograd_input_transform3x3(const SimpleTensor<T> &src, SimpleTensor<T> &ds
{
for(int x = 0; x < num_tiles_x; ++x)
{
- int xi = x * 2 - conv_info.pad_left();
- int yi = y * 2 - conv_info.pad_top();
+ int xi = x * step_x - conv_info.pad_left();
+ int yi = y * step_y - conv_info.pad_top();
- // Get the 4x4 tile from the input tensor
- get_tile(src, src_tile, Coordinates(xi, yi, z, b));
+ // Get the tile from the input tensor
+ get_tile(in, src_tile, Coordinates(xi, yi, z, b));
// Compute the transformation
matrix_multiply(matrix, src_tile, tmp_tile);
matrix_multiply(tmp_tile, matrix_transposed, dst_tile);
- // Store the 4x4 output tile across the 16 channels
- for(int i = 0; i < 16; ++i)
+ // Store the output tile across the channels
+ for(int i = 0; i < out_d; ++i)
{
int xo = z;
int yo = x + y * num_tiles_x;
- dst[coords2index(dst.shape(), Coordinates(xo, yo, i, b))] = dst_tile[i];
+ out[coords2index(out.shape(), Coordinates(xo, yo, i, b))] = dst_tile[i];
}
}
}
}
}
+
+ return out;
}
template <typename T>
-void winograd_output_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &out, int num_tiles_x)
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
{
- ARM_COMPUTE_ERROR_ON(in.shape()[2] != 16);
+ ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
+
+ // Create reference
+ SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
+
+ const Size2D output_tile_size = winograd_info.output_tile_size;
+ const Size2D kernel_size = winograd_info.kernel_size;
+
+ TensorShape kernel_tile_dims(kernel_size.width, kernel_size.height);
+
+ // Calculate dimensions for the tile
+ const unsigned int input_tile_w = output_tile_size.width + kernel_size.width - 1;
+ const unsigned int input_tile_h = output_tile_size.height + kernel_size.height - 1;
+ const unsigned int input_tile_area = input_tile_w * input_tile_h;
+
+ // Simple tensor for the input tile
+ SimpleTensor<T> input_tile{ kernel_tile_dims, in.data_type(), 1 };
+
+ // Simple tensor for the transformation matrix
+ SimpleTensor<T> trans_matrix{ TensorShape(kernel_tile_dims[0], input_tile_w), in.data_type(), 1 };
+
+ // Simple tensor for the transformation matrix transpose
+ SimpleTensor<T> trans_matrix_transposed{ TensorShape(input_tile_w, kernel_tile_dims[0]), in.data_type(), 1 };
+
+ // Simple tensor for the temporary tile
+ SimpleTensor<T> tmp_tile{ TensorShape(kernel_tile_dims[0], input_tile_w), in.data_type(), 1 };
+
+ // Simple tensor for the output tile
+ SimpleTensor<T> transf_tile{ TensorShape(input_tile_w, input_tile_w), in.data_type(), 1 };
+
+ // Initialize matrix for the filter transform
+ initialize_matrix_transform(trans_matrix, output_tile_size, kernel_size, WinogradTransformType::FILTER);
+
+ // Transpose the transformation matrix
+ transpose_matrix(trans_matrix, trans_matrix_transposed);
+
+ const int num_channels = in.shape()[2];
+ const int num_filters = in.shape()[3];
+ const int num_batches = in.shape().total_size() / (kernel_size.area() * num_channels * num_filters);
+
+ for(int n = 0; n < num_batches; ++n)
+ {
+ for(int w = 0; w < num_filters; ++w)
+ {
+ for(int z = 0; z < num_channels; ++z)
+ {
+ // Load the tile from the input tensor
+ get_tile(in, input_tile, Coordinates(0, 0, z, w, n));
+
+ // First transformation
+ matrix_multiply(trans_matrix, input_tile, tmp_tile);
+
+ // Second transformation
+ matrix_multiply(tmp_tile, trans_matrix_transposed, transf_tile);
+
+ // Store the output tile across the channels
+ const int output_offset = w + z * num_filters;
+
+ // Store the values across the channels
+ for(unsigned int i = 0; i < input_tile_area; ++i)
+ {
+ out[output_offset + i * num_filters * num_channels] = transf_tile[i];
+ }
+ }
+ }
+ }
+
+ return out;
+}
+
+template <typename T>
+SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(winograd_info.output_data_layout != DataLayout::NCHW, "Only supported NCHW data format");
+
+ const PadStrideInfo conv_info = winograd_info.convolution_info;
+ const Size2D input_dimensions = winograd_info.input_dimensions;
+ const Size2D output_tile_size = winograd_info.output_tile_size;
+ const Size2D kernel_size = winograd_info.kernel_size;
+
+ // Create reference
+ SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
+
+ // Calculate dimensions for the tiles
+ const unsigned int in_tile_w = output_tile_size.width + kernel_size.width - 1;
+ const unsigned int in_tile_h = output_tile_size.height + kernel_size.height - 1;
+ const unsigned int out_tile_w = output_tile_size.width;
+ const unsigned int out_tile_h = output_tile_size.height;
+
+ ARM_COMPUTE_ERROR_ON(in.shape()[2] != (in_tile_w * in_tile_h));
ARM_COMPUTE_ERROR_ON(in.shape()[0] != out.shape()[2]);
- // Simple tensor for the 3x3 input tile
- SimpleTensor<T> input_tile{ TensorShape(4u, 4u), in.data_type(), 1 };
+ // Compute tile dimensions
+ // Input tile dimensions
+ TensorShape in_tile_dims(in_tile_w, in_tile_h);
+
+ // Output tile dimensions
+ TensorShape out_tile_dims(output_tile_size.width, output_tile_size.height);
+
+ // Transformation matrix dimensions
+ TensorShape tr_tile_dims(in_tile_w, output_tile_size.width);
+
+ // Create tensors
+ // Simple tensor for the input tile
+ SimpleTensor<T> input_tile{ in_tile_dims, in.data_type(), 1 };
// Simple tensor for the transformation matrix
- SimpleTensor<T> trans_matrix{ TensorShape(4u, 2u), in.data_type(), 1 };
+ SimpleTensor<T> trans_matrix{ tr_tile_dims, in.data_type(), 1 };
// Simple tensor for the transformation matrix transpose
- SimpleTensor<T> trans_matrix_transposed{ TensorShape(2u, 4u), in.data_type(), 1 };
-
- // Simple tensor for the 4x3 temporary tile
- SimpleTensor<T> tmp_tile{ TensorShape(4u, 2u), in.data_type(), 1 };
-
- // Simple tensor for the 4x4 output tile
- SimpleTensor<T> output_tile{ TensorShape(2u, 2u), in.data_type(), 1 };
-
- // Initialize transformation matrix
- // 1 | 1 | 1 | 1
- // 0 | 1 | -1 | -1
- trans_matrix[0 + 0 * 4] = 1.0f;
- trans_matrix[1 + 0 * 4] = 1.0f;
- trans_matrix[2 + 0 * 4] = 1.0f;
- trans_matrix[3 + 0 * 4] = 0.0f;
- trans_matrix[0 + 1 * 4] = 0.0f;
- trans_matrix[1 + 1 * 4] = 1.0f;
- trans_matrix[2 + 1 * 4] = -1.0f;
- trans_matrix[3 + 1 * 4] = -1.0f;
+ SimpleTensor<T> trans_matrix_transposed{ TensorShape(tr_tile_dims[1], tr_tile_dims[0]), in.data_type(), 1 };
+
+ // Simple tensor for the temporary tile
+ SimpleTensor<T> tmp_tile{ tr_tile_dims, in.data_type(), 1 };
+
+ // Simple tensor for the output tile
+ SimpleTensor<T> output_tile{ out_tile_dims, in.data_type(), 1 };
+
+ // Initialize matrix for the output transform
+ initialize_matrix_transform(trans_matrix, output_tile_size, kernel_size, WinogradTransformType::OUTPUT);
// Transpose the transformation matrix
transpose_matrix(trans_matrix, trans_matrix_transposed);
@@ -272,13 +369,22 @@ void winograd_output_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &ou
const int stridez_out = stridey_out * h_out;
const int stridew_out = stridez_out * c_out;
+ // Compute number of elements to process in the X and Y direction
+ const int num_elements_x = input_dimensions.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right();
+ const int num_elements_y = input_dimensions.height - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom();
+ const int num_tiles_x = std::ceil(num_elements_x / static_cast<float>(output_tile_size.width));
+ const int num_tiles_y = std::ceil(num_elements_y / static_cast<float>(output_tile_size.height));
+
+ ARM_COMPUTE_UNUSED(num_tiles_y);
+ ARM_COMPUTE_ERROR_ON(in.shape()[1] != static_cast<unsigned int>(num_tiles_x * num_tiles_y));
+
for(int n = 0; n < num_batches; ++n)
{
for(int y = 0; y < h_in; ++y)
{
for(int x = 0; x < w_in; ++x)
{
- // Load the 4x4 tile across the 16 channels of the input tensor
+ // Load the input tile tile across the channels of the input tensor
for(int z = 0; z < c_in; ++z)
{
input_tile[z] = in[x + (y * stridey_in) + (z * stridez_in) + (n * stridew_in)];
@@ -290,102 +396,34 @@ void winograd_output_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &ou
// Second transformation
matrix_multiply(tmp_tile, trans_matrix_transposed, output_tile);
- // Store the 2x2 output tile
- const int xo = (y % num_tiles_x) * 2;
- const int yo = (y / num_tiles_x) * 2;
+ // Store the output tile
+ const int xo = (y % num_tiles_x) * out_tile_w;
+ const int yo = (y / num_tiles_x) * out_tile_h;
const int zo = x;
- const int output_offset = xo + (yo * stridey_out) + (zo * stridez_out) + (n * stridew_out);
- out[output_offset + 0 * stridey_out + 0] = output_tile[0 + 0 * 2];
-
- // Check out-of-bound writes
- if(xo + 1 < w_out)
- {
- out[output_offset + 0 * stridey_out + 1] = output_tile[1 + 0 * 2];
- }
-
- if(yo + 1 < h_out)
- {
- out[output_offset + 1 * stridey_out + 0] = output_tile[0 + 1 * 2];
- }
+ const int output_offset = xo + (yo * stridey_out) + (zo * stridez_out) + (n * stridew_out);
- if((yo + 1 < h_out) && (xo + 1 < w_out))
+ for(int yi = 0; yi < static_cast<int>(out_tile_h); ++yi)
{
- out[output_offset + 1 * stridey_out + 1] = output_tile[1 + 1 * 2];
+ for(int xi = 0; xi < static_cast<int>(out_tile_w); ++xi)
+ {
+ // Check out-of-bound writes
+ if((xo + xi < w_out) && (yo + yi < h_out))
+ {
+ out[output_offset + yi * stridey_out + xi] = output_tile[xi + yi * out_tile_w];
+ }
+ }
}
}
}
}
-}
-} // namespace
-
-template <typename T>
-SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims)
-{
- ARM_COMPUTE_ERROR_ON(kernel_dims.width != kernel_dims.height);
- ARM_COMPUTE_ERROR_ON(src.data_layout() != DataLayout::NCHW);
-
- SimpleTensor<T> dst{ dst_shape, src.data_type() };
-
- switch(kernel_dims.width)
- {
- case 3:
- winograd_input_transform3x3(src, dst, conv_info);
- break;
- default:
- ARM_COMPUTE_ERROR("Only 3x3 kernels are supported");
- }
-
- return dst;
-}
-
-template <typename T>
-SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &output_tile)
-{
- ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
-
- // Create reference
- SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
-
- switch(in.shape()[0])
- {
- case 3:
- winograd_filter_transform3x3(in, out, output_tile);
- break;
- default:
- ARM_COMPUTE_ERROR("Only supported 3x3 kernel");
- break;
- }
-
- return out;
-}
-
-template <typename T>
-SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles)
-{
- ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
- ARM_COMPUTE_ERROR_ON(kernel_dims.width != kernel_dims.height);
- ARM_COMPUTE_ERROR_ON(in.shape()[1] != num_tiles.area());
-
- // Create reference
- SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
-
- switch(kernel_dims.width)
- {
- case 3:
- winograd_output_transform3x3(in, out, num_tiles.width);
- break;
- default:
- ARM_COMPUTE_ERROR("Only supported 3x3 kernel");
- break;
- }
return out;
}
-template SimpleTensor<float> winograd_input_transform(const SimpleTensor<float> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
-template SimpleTensor<float> winograd_filter_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const Size2D &output_tile);
-template SimpleTensor<float> winograd_output_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles);
+template SimpleTensor<float> winograd_filter_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
+template SimpleTensor<float> winograd_input_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
+template SimpleTensor<float> winograd_output_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/Winograd.h b/tests/validation/reference/Winograd.h
index 62e136b09d..29181f1142 100644
--- a/tests/validation/reference/Winograd.h
+++ b/tests/validation/reference/Winograd.h
@@ -36,14 +36,22 @@ namespace validation
{
namespace reference
{
+/** Winograd transform type */
+enum class WinogradTransformType
+{
+ INPUT, /**< Winograd input transform */
+ FILTER, /**< Winograd filter transform */
+ OUTPUT /**< Winograd output transform */
+};
+
template <typename T>
-SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
+SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
template <typename T>
-SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &output_tile);
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
template <typename T>
-SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles);
+SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
} // namespace reference
} // namespace validation
} // namespace test