aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-02-22 16:17:20 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit7e4b23953e885e58d655a7d9f35a1afcc38365e4 (patch)
tree4f5a3f6535aae10a36482bd4f996d3427ac77080
parent66c656a1d10831d8311f7797b285faa2c30bcb3f (diff)
downloadComputeLibrary-7e4b23953e885e58d655a7d9f35a1afcc38365e4.tar.gz
COMPMID-935 - Implementing Convolution with Winograd on OpenCL (part 2)
Implemented Winograd Filter Transform 3x3 on OpenCL Change-Id: I8f2b2dd938c5c000ef7ce392a37fb7b8b4202a4e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/122708 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h74
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h25
-rw-r--r--src/core/CL/CLKernelLibrary.cpp1
-rw-r--r--src/core/CL/cl_kernels/winograd.cl97
-rw-r--r--src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp139
-rw-r--r--tests/datasets/ShapeDatasets.h32
-rw-r--r--tests/datasets/WinogradFilterTransformDataset.h128
-rw-r--r--tests/validation/CL/Winograd.cpp85
-rw-r--r--[-rwxr-xr-x]tests/validation/Helpers.h0
-rw-r--r--tests/validation/fixtures/WinogradLayerFixture.h84
-rw-r--r--tests/validation/reference/Winograd.cpp105
-rw-r--r--tests/validation/reference/Winograd.h5
13 files changed, 767 insertions, 9 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index ca2cb0411a..ef629c2e81 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -109,6 +109,7 @@
#include "arm_compute/core/CL/kernels/CLWarpAffineKernel.h"
#include "arm_compute/core/CL/kernels/CLWarpPerspectiveKernel.h"
#include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h"
+#include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h"
#include "arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h"
#endif /* __ARM_COMPUTE_CLKERNELS_H__ */
diff --git a/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h b/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h
new file mode 100644
index 0000000000..ec5e51482a
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H__
+#define __ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the Winograd filter transform kernel. */
+class CLWinogradFilterTransformKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLWinogradFilterTransformKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLWinogradFilterTransformKernel(const CLWinogradFilterTransformKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLWinogradFilterTransformKernel &operator=(const CLWinogradFilterTransformKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLWinogradFilterTransformKernel(CLWinogradFilterTransformKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLWinogradFilterTransformKernel &operator=(CLWinogradFilterTransformKernel &&) = default;
+ /** Default destructor */
+ ~CLWinogradFilterTransformKernel() = default;
+ /** Set the input and output tensor.
+ *
+ * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout).
+ * kernel_x must be 3 and equal to kernel_y. Data types supported: F32.
+ * @param[out] output Destination tensor. The output is a 3D tensor with dimensions [OFM, IFM, 16]. Data type supported: same as @p input
+ */
+ void configure(const ICLTensor *input, ICLTensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradFilterTransformKernel
+ *
+ * @param[in] input Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout).
+ * kernel_x must be 3 and equal to kernel_y. Data types supported: F32.
+ * @param[in] output Destination tensor info. The output is a 3D tensor with dimensions [OFM, IFM, 16]. Data type supported: same as @p input
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLWINOGRADFILTERTRANSFORMKERNEL_H__ */
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 354f60d016..9cb8023463 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -184,7 +184,7 @@ inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorI
output_shape = compute_transposed_shape(*input);
}
- // If the we run multiple batches we need 1xW transpose, too.
+ // If we run multiple batches we need 1xW transpose, too.
if(is_batched_fc_layer)
{
output_shape = compute_transposed_shape(input->clone()->set_tensor_shape(output_shape));
@@ -193,6 +193,29 @@ inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorI
return output_shape;
}
+
+inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input)
+{
+ // COMPMID-984 (giaiod01)
+ TensorShape tensor_shape{ input.tensor_shape() };
+
+ if(input.data_layout() == DataLayout::NCHW)
+ {
+ tensor_shape.remove_dimension(0);
+ tensor_shape.set(Window::DimX, input.dimension(3));
+ tensor_shape.set(Window::DimY, input.dimension(2));
+ tensor_shape.set(Window::DimZ, 16);
+ }
+ else
+ {
+ tensor_shape.remove_dimension(1);
+ tensor_shape.set(Window::DimY, input.dimension(2));
+ tensor_shape.set(Window::DimZ, 16);
+ }
+
+ return tensor_shape;
+}
+
inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const PadStrideInfo &conv_info, const Size2D &kernel_size)
{
// Compute height
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 40aceb702a..4b7fa8a3b3 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -351,6 +351,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "warp_affine_bilinear", "warp_affine.cl" },
{ "warp_perspective_nearest_neighbour", "warp_perspective.cl" },
{ "warp_perspective_bilinear", "warp_perspective.cl" },
+ { "winograd_filter_transform_2x2_3x3_nchw", "winograd.cl" },
{ "winograd_input_transform_2x2_3x3_stepz1_nchw", "winograd.cl" },
{ "winograd_input_transform_2x2_3x3_stepz2_nchw", "winograd.cl" },
{ "YUYV422_to_IYUV_bt709", "color_convert.cl" },
diff --git a/src/core/CL/cl_kernels/winograd.cl b/src/core/CL/cl_kernels/winograd.cl
index fa06601c50..238e21a18a 100644
--- a/src/core/CL/cl_kernels/winograd.cl
+++ b/src/core/CL/cl_kernels/winograd.cl
@@ -205,4 +205,99 @@ __kernel void winograd_input_transform_2x2_3x3_stepz2_nchw(
vstore2(out32, 0, (__global float *)(dst_addr + 14 * dst_stride_z));
vstore2(out33, 0, (__global float *)(dst_addr + 15 * dst_stride_z));
}
-#endif //defined(NUM_TILES_X) \ No newline at end of file
+#endif //defined(NUM_TILES_X)
+
+#if defined(NUM_CHANNELS)
+
+/** This OpenCL kernel performs Winograd filter transform 3x3 when the data format is NCHW and the output tile is 2x2
+ *
+ * @note The number of channels must be passed at compile time using -DNUM_CHANNELS: e.g. -DNUM_CHANNELS=64
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void winograd_filter_transform_2x2_3x3_nchw(
+ TENSOR4D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
+{
+ Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, NUM_CHANNELS);
+
+ const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
+
+ // Load the values from the input tensor
+ float3 w0 = vload3(0, (__global float *)(src_addr + 0 * src_stride_y));
+ float3 w1 = vload3(0, (__global float *)(src_addr + 1 * src_stride_y));
+ float3 w2 = vload3(0, (__global float *)(src_addr + 2 * src_stride_y));
+
+ // Transform the 3x3 tile in a 4x4 tile
+ float4 out0 = 0.0f;
+ float4 out1 = 0.0f;
+ float4 out2 = 0.0f;
+ float4 out3 = 0.0f;
+
+ // Row 0
+ out0.s0 = (w0.s0);
+ out0.s1 = (w0.s0 + w0.s1 + w0.s2) * 0.5f;
+ out0.s2 = (w0.s0 + w0.s2 - w0.s1) * 0.5f;
+ out0.s3 = (w0.s2);
+
+ // Row 1
+ out1.s0 = (w0.s0 + w1.s0 + w2.s0) * 0.5f;
+ out1.s1 = (w0.s0 + w1.s0 + w2.s0 + w0.s1 + w1.s1 + w2.s1 + w0.s2 + w1.s2 + w2.s2) * 0.25f;
+ out1.s2 = (w0.s0 + w1.s0 + w2.s0 + w0.s2 + w1.s2 + w2.s2 - w0.s1 - w1.s1 - w2.s1) * 0.25f;
+ out1.s3 = (w0.s2 + w1.s2 + w2.s2) * 0.5f;
+
+ // Row 2
+ out2.s0 = (w0.s0 + w2.s0 - w1.s0) * 0.5f;
+ out2.s1 = (w0.s0 + w2.s0 + w0.s1 + w2.s1 + w0.s2 + w2.s2 - w1.s0 - w1.s1 - w1.s2) * 0.25f;
+ out2.s2 = (w0.s0 + w2.s0 + w1.s1 + w0.s2 + w2.s2 - w1.s0 - w0.s1 - w2.s1 - w1.s2) * 0.25f;
+ out2.s3 = (w0.s2 + w2.s2 - w1.s2) * 0.5f;
+
+ // Row 3
+ out3.s0 = (w2.s0);
+ out3.s1 = (w2.s0 + w2.s1 + w2.s2) * 0.5f;
+ out3.s2 = (w2.s0 + w2.s2 - w2.s1) * 0.5f;
+ out3.s3 = (w2.s2);
+
+ int z = get_global_id(2);
+ int x0 = z / NUM_CHANNELS; // idx filter
+ int y0 = z % NUM_CHANNELS; // idx channel
+
+ // Get output address
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * dst_stride_x + y0 * dst_stride_y;
+
+ // Store the 16 values across the 16 channels
+ *(__global float *)(dst_addr + 0 * dst_stride_z) = out0.s0;
+ *(__global float *)(dst_addr + 1 * dst_stride_z) = out0.s1;
+ *(__global float *)(dst_addr + 2 * dst_stride_z) = out0.s2;
+ *(__global float *)(dst_addr + 3 * dst_stride_z) = out0.s3;
+ *(__global float *)(dst_addr + 4 * dst_stride_z) = out1.s0;
+ *(__global float *)(dst_addr + 5 * dst_stride_z) = out1.s1;
+ *(__global float *)(dst_addr + 6 * dst_stride_z) = out1.s2;
+ *(__global float *)(dst_addr + 7 * dst_stride_z) = out1.s3;
+ *(__global float *)(dst_addr + 8 * dst_stride_z) = out2.s0;
+ *(__global float *)(dst_addr + 9 * dst_stride_z) = out2.s1;
+ *(__global float *)(dst_addr + 10 * dst_stride_z) = out2.s2;
+ *(__global float *)(dst_addr + 11 * dst_stride_z) = out2.s3;
+ *(__global float *)(dst_addr + 12 * dst_stride_z) = out3.s0;
+ *(__global float *)(dst_addr + 13 * dst_stride_z) = out3.s1;
+ *(__global float *)(dst_addr + 14 * dst_stride_z) = out3.s2;
+ *(__global float *)(dst_addr + 15 * dst_stride_z) = out3.s3;
+}
+#endif // defined(NUM_CHANNELS)
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
new file mode 100644
index 0000000000..3dbbe157b2
--- /dev/null
+++ b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+using namespace arm_compute::misc::shape_calculator;
+
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != 3);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != input->dimension(1));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+
+ // Checks performed when output is configured
+ if(output->total_size() != 0)
+ {
+ const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input));
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ constexpr unsigned int num_elems_processed_per_iteration_x = 3;
+ constexpr unsigned int num_elems_processed_per_iteration_y = 3;
+
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+ bool window_changed = false;
+
+ AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
+ AccessWindowStatic output_access(output, 0, 0, output->dimension(0), output->dimension(1));
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, input->valid_region());
+
+ Window win_collapsed = win.collapse(win, Window::DimZ);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win_collapsed);
+}
+} // namespace
+
+CLWinogradFilterTransformKernel::CLWinogradFilterTransformKernel()
+ : _input(nullptr), _output(nullptr)
+{
+}
+
+void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ // Output tensor auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info())));
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
+
+ // Set build options
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DNUM_CHANNELS=" + support::cpp11::to_string(input->info()->dimension(2)));
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("winograd_filter_transform_2x2_3x3_nchw", build_opts.options()));
+
+ _input = input;
+ _output = output;
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure(win_config.second);
+}
+
+Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
+
+ return Status{};
+}
+
+void CLWinogradFilterTransformKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ // Setup output window
+ Window window_out;
+ window_out.use_tensor_dimensions(_output->info()->tensor_shape(), 0);
+
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, window);
+ add_3D_tensor_argument(idx, _output, window_out);
+ enqueue(queue, *this, window);
+}
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 4b563708e1..e939a6f5a7 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -238,6 +238,38 @@ public:
}
};
+/** Data set containing medium 3D tensor shapes. */
+class Medium3DShapes final : public ShapeDataset
+{
+public:
+ Medium3DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 42U, 37U, 8U },
+ TensorShape{ 57U, 60U, 13U },
+ TensorShape{ 128U, 64U, 21U },
+ TensorShape{ 83U, 72U, 14U }
+ })
+ {
+ }
+};
+
+/** Data set containing medium 4D tensor shapes. */
+class Medium4DShapes final : public ShapeDataset
+{
+public:
+ Medium4DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 42U, 37U, 8U, 15U },
+ TensorShape{ 57U, 60U, 13U, 8U },
+ TensorShape{ 128U, 64U, 21U, 13U },
+ TensorShape{ 83U, 72U, 14U, 5U }
+ })
+ {
+ }
+};
+
/** Data set containing large tensor shapes. */
class LargeShapes final : public ShapeDataset
{
diff --git a/tests/datasets/WinogradFilterTransformDataset.h b/tests/datasets/WinogradFilterTransformDataset.h
new file mode 100644
index 0000000000..07d0283b55
--- /dev/null
+++ b/tests/datasets/WinogradFilterTransformDataset.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET
+#define ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class WinogradFilterTransformDataset
+{
+public:
+ using type = std::tuple<TensorShape, bool>;
+
+ struct iterator
+ {
+ iterator(std::vector<TensorShape>::const_iterator a_it,
+ std::vector<bool>::const_iterator is_nchw_it)
+ : _a_it{ std::move(a_it) },
+ _is_nchw_it{ std::move(is_nchw_it) }
+ {
+ }
+
+ std::string description() const
+ {
+ std::stringstream description;
+ description << "Input=" << *_a_it << ":";
+ description << "IsNCHW=" << *_is_nchw_it << ":";
+ return description.str();
+ }
+
+ WinogradFilterTransformDataset::type operator*() const
+ {
+ return std::make_tuple(*_a_it, *_is_nchw_it);
+ }
+
+ iterator &operator++()
+ {
+ ++_a_it;
+ ++_is_nchw_it;
+
+ return *this;
+ }
+
+ private:
+ std::vector<TensorShape>::const_iterator _a_it;
+ std::vector<bool>::const_iterator _is_nchw_it;
+ };
+
+ iterator begin() const
+ {
+ return iterator(_a_shapes.begin(), _is_nchw.begin());
+ }
+
+ int size() const
+ {
+ return std::min(_a_shapes.size(), _is_nchw.size());
+ }
+
+ void add_config(TensorShape a, bool is_nchw)
+ {
+ _a_shapes.emplace_back(std::move(a));
+ _is_nchw.emplace_back(std::move(is_nchw));
+ }
+
+protected:
+ WinogradFilterTransformDataset() = default;
+ WinogradFilterTransformDataset(WinogradFilterTransformDataset &&) = default;
+
+private:
+ std::vector<TensorShape> _a_shapes{};
+ std::vector<bool> _is_nchw{};
+};
+
+class SmallWinogradFilterTransformDataset final : public WinogradFilterTransformDataset
+{
+public:
+ SmallWinogradFilterTransformDataset()
+ {
+ add_config(TensorShape(3U, 3U, 7U, 4U), true);
+ add_config(TensorShape(3U, 3U, 4U, 13U), true);
+ add_config(TensorShape(3U, 3U, 9U, 2U), true);
+ add_config(TensorShape(3U, 3U, 3U, 5U), true);
+ }
+};
+
+class LargeWinogradFilterTransformDataset final : public WinogradFilterTransformDataset
+{
+public:
+ LargeWinogradFilterTransformDataset()
+ {
+ add_config(TensorShape(3U, 3U, 32U, 64U), true);
+ add_config(TensorShape(3U, 3U, 51U, 13U), true);
+ add_config(TensorShape(3U, 3U, 53U, 47U), true);
+ add_config(TensorShape(3U, 3U, 128U, 384U), true);
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET */
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 664b3f4ef8..0b21ed2577 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -18,15 +18,20 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/datasets/WinogradFilterTransformDataset.h"
#include "tests/datasets/WinogradInputTransformDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
@@ -40,6 +45,13 @@ namespace test
{
namespace validation
{
+namespace
+{
+constexpr AbsoluteTolerance<float> tolerance_f32(0.0001f);
+} // namespace
+
+using namespace arm_compute::misc::shape_calculator;
+
TEST_SUITE(CL)
TEST_SUITE(Winograd)
@@ -125,11 +137,76 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::Dat
{
validate(CLAccessor(_target), _reference);
}
+TEST_SUITE_END() // InputTransform
+
+TEST_SUITE(FilterTransform)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("InputInfo",{
+ TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F16), // F16 not supported
+ TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
+ TensorInfo(TensorShape(5U, 5U, 5U, 3U), 1, DataType::F32), // Kernel size not supported
+ TensorInfo(TensorShape(3U, 3U), 1, DataType::F32), // valid
+ TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F32), // valid
+ TensorInfo(TensorShape(3U, 3U, 37U, 2U), 1, DataType::F32), // valid
+ TensorInfo(TensorShape(3U, 3U, 37U, 22U), 1, DataType::F32) // valid
+ }),
+ framework::dataset::make("OutputInfo", {
+ TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F16),
+ TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 1U, 16U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32),
+ TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32),
+ TensorInfo(TensorShape(22U, 37U, 16U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Expected", { false, false, false, true, true, true, true })),
+ input_info, output_info, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
-TEST_SUITE_END()
+using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
+using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ shape_a, is_nchw_format, data_type)
+{
+ ARM_COMPUTE_UNUSED(is_nchw_format);
+
+ TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type));
+
+ // Create tensors
+ CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
+ CLTensor b = create_tensor<CLTensor>(shape_b, data_type);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLWinogradFilterTransform winograd_filter_transform;
+ winograd_filter_transform.configure(&a, &b);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradFilterTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // FilterTransform
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // Winograd
+TEST_SUITE_END() // CL
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index b192f317b4..b192f317b4 100755..100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradLayerFixture.h
index 95e331560d..bfe1efce3b 100644
--- a/tests/validation/fixtures/WinogradLayerFixture.h
+++ b/tests/validation/fixtures/WinogradLayerFixture.h
@@ -27,7 +27,6 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -42,8 +41,6 @@
namespace arm_compute
{
-class NEWinogradLayer;
-
namespace test
{
namespace validation
@@ -224,6 +221,87 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class WinogradFilterTransformValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, bool is_nchw_format, DataType data_type)
+ {
+ TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type));
+
+ _target = compute_target(input_shape, output_shape, is_nchw_format, data_type);
+ _reference = compute_reference(input_shape, output_shape, is_nchw_format, data_type);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, float min, float max)
+ {
+ switch(tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<> distribution(min, max);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Not supported");
+ library->fill_tensor_uniform(tensor, i);
+ break;
+ }
+ }
+ }
+
+ TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, DataType data_type)
+ {
+ ARM_COMPUTE_UNUSED(is_nchw_format);
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type);
+
+ // Create and configure function
+ FunctionType filter_transform;
+ filter_transform.configure(&src, &dst);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src), 0, -1.f, 1.f);
+
+ filter_transform.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, DataType data_type)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_nchw_format);
+
+ // Create reference
+ SimpleTensor<T> src{ input_shape, data_type, 1 };
+
+ // Fill reference
+ fill(src, 0, -1.f, 1.f);
+
+ return reference::winograd_filter_transform<T>(src, output_shape);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/Winograd.cpp b/tests/validation/reference/Winograd.cpp
index 371bb6348e..3ed55fb9fc 100644
--- a/tests/validation/reference/Winograd.cpp
+++ b/tests/validation/reference/Winograd.cpp
@@ -26,6 +26,8 @@
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/Utils.h"
+#include "arm_compute/core/Types.h"
+
namespace arm_compute
{
namespace test
@@ -108,6 +110,87 @@ void winograd_input_transform3x3(const SimpleTensor<T> &src, SimpleTensor<T> &ds
}
}
}
+
+template <typename T>
+void winograd_filter_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &out)
+{
+ // Simple tensor for the 3x3 input tile
+ SimpleTensor<T> input_tile{ TensorShape(3u, 3u), in.data_type(), 1 };
+
+ // Simple tensor for the transformation matrix
+ SimpleTensor<T> trans_matrix{ TensorShape(3u, 4u), in.data_type(), 1 };
+
+ // Simple tensor for the transformation matrix transpose
+ SimpleTensor<T> trans_matrix_transposed{ TensorShape(4u, 3u), in.data_type(), 1 };
+
+ // Simple tensor for the 4x3 temporary tile
+ SimpleTensor<T> tmp_tile{ TensorShape(3u, 4u), in.data_type(), 1 };
+
+ // Simple tensor for the 4x4 output tile
+ SimpleTensor<T> output_tile{ TensorShape(4u, 4u), in.data_type(), 1 };
+
+ // Initialize transformation matrix
+ // 1 | 0 | 0
+ // 0.5 | 0.5 | 0.5
+ // 0.5 |-0.5 | 0.5
+ // 0 | 0 | 1
+ trans_matrix[0 + 0 * 3] = 1.0f;
+ trans_matrix[1 + 0 * 3] = 0.0f;
+ trans_matrix[2 + 0 * 3] = 0.0f;
+ trans_matrix[0 + 1 * 3] = 0.5f;
+ trans_matrix[1 + 1 * 3] = 0.5f;
+ trans_matrix[2 + 1 * 3] = 0.5f;
+ trans_matrix[0 + 2 * 3] = 0.5f;
+ trans_matrix[1 + 2 * 3] = -0.5f;
+ trans_matrix[2 + 2 * 3] = 0.5f;
+ trans_matrix[0 + 3 * 3] = 0.0f;
+ trans_matrix[1 + 3 * 3] = 0.0f;
+ trans_matrix[2 + 3 * 3] = 1.0f;
+
+ // Transpose the transformation matrix
+ transpose_matrix(trans_matrix, trans_matrix_transposed);
+
+ const int num_channels = in.shape()[2];
+ const int num_filters = in.shape()[3];
+ const int num_batches = in.shape().total_size() / (9 * num_channels * num_filters);
+
+ for(int n = 0; n < num_batches; ++n)
+ {
+ for(int w = 0; w < num_filters; ++w)
+ {
+ for(int z = 0; z < num_channels; ++z)
+ {
+ // Load the 3x3 tile from the input tensor
+ get_tile(in, input_tile, Coordinates(0, 0, z, w, n));
+
+ // First transformation
+ matrix_multiply(trans_matrix, input_tile, tmp_tile);
+
+ // Second transformation
+ matrix_multiply(tmp_tile, trans_matrix_transposed, output_tile);
+
+ // Store the 4x4 output tile across the 16 channels
+ const int output_offset = w + z * num_filters;
+ out[output_offset + 0 * num_filters * num_channels] = output_tile[0 + 0 * 4];
+ out[output_offset + 1 * num_filters * num_channels] = output_tile[1 + 0 * 4];
+ out[output_offset + 2 * num_filters * num_channels] = output_tile[2 + 0 * 4];
+ out[output_offset + 3 * num_filters * num_channels] = output_tile[3 + 0 * 4];
+ out[output_offset + 4 * num_filters * num_channels] = output_tile[0 + 1 * 4];
+ out[output_offset + 5 * num_filters * num_channels] = output_tile[1 + 1 * 4];
+ out[output_offset + 6 * num_filters * num_channels] = output_tile[2 + 1 * 4];
+ out[output_offset + 7 * num_filters * num_channels] = output_tile[3 + 1 * 4];
+ out[output_offset + 8 * num_filters * num_channels] = output_tile[0 + 2 * 4];
+ out[output_offset + 9 * num_filters * num_channels] = output_tile[1 + 2 * 4];
+ out[output_offset + 10 * num_filters * num_channels] = output_tile[2 + 2 * 4];
+ out[output_offset + 11 * num_filters * num_channels] = output_tile[3 + 2 * 4];
+ out[output_offset + 12 * num_filters * num_channels] = output_tile[0 + 3 * 4];
+ out[output_offset + 13 * num_filters * num_channels] = output_tile[1 + 3 * 4];
+ out[output_offset + 14 * num_filters * num_channels] = output_tile[2 + 3 * 4];
+ out[output_offset + 15 * num_filters * num_channels] = output_tile[3 + 3 * 4];
+ }
+ }
+ }
+}
} // namespace
template <typename T>
@@ -130,7 +213,29 @@ SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const Tenso
return dst;
}
+template <typename T>
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
+
+ // Create reference
+ SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
+
+ switch(in.shape()[0])
+ {
+ case 3:
+ winograd_filter_transform3x3(in, out);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Only supported 3x3 kernel");
+ break;
+ }
+
+ return out;
+}
+
template SimpleTensor<float> winograd_input_transform(const SimpleTensor<float> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
+template SimpleTensor<float> winograd_filter_transform(const SimpleTensor<float> &in, const TensorShape &output_shape);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/Winograd.h b/tests/validation/reference/Winograd.h
index ed95239db3..ba8e5c1cb6 100644
--- a/tests/validation/reference/Winograd.h
+++ b/tests/validation/reference/Winograd.h
@@ -24,6 +24,8 @@
#ifndef __ARM_COMPUTE_TEST_WINOGRAD_H__
#define __ARM_COMPUTE_TEST_WINOGRAD_H__
+#include "arm_compute/core/TensorShape.h"
+
#include "tests/SimpleTensor.h"
namespace arm_compute
@@ -36,6 +38,9 @@ namespace reference
{
template <typename T>
SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
+
+template <typename T>
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape);
} // namespace reference
} // namespace validation
} // namespace test