aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-09-03 16:59:20 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit000d33a0e4bfc129a8f2968d4e5ee0793df70a1e (patch)
tree6b850cb3ffad4be4970b9ab6b6c1ef74a1be69af
parente70ebc17658840f2099facca72e2194ae593f820 (diff)
downloadComputeLibrary-000d33a0e4bfc129a8f2968d4e5ee0793df70a1e.tar.gz
COMPMID-1552: support kernels sizes 1x7, 7x1, 1x5, 5x1 in NEWinograd
Refactored the validate method to make it easier to maintain in the future when adding support for new kernels sizes Change-Id: I12d9fe7af15ceb0e655cef61ca94407558fb29e8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146713 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp2
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp20
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp170
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp178
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp9
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp329
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp44
9 files changed, 875 insertions, 125 deletions
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
index 6ed146bf85..00327f5102 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
@@ -217,7 +217,7 @@ namespace winograd
_matrix_base + start_channel,
_matrix_stride,
_matrix_row_stride,
- (_biases)?(_biases + start_channel):(nullptr),
+ (_biases != nullptr) ? _biases + start_channel : nullptr,
_outptr + start_channel
);
}
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 8f990712e8..f5609b6f5c 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -42,7 +42,7 @@ namespace
{
inline bool is_kernel_size_supported(Size2D size)
{
- const std::array<Size2D, 4> supported_input_sizes = { { Size2D(1, 3), Size2D(3, 1), Size2D(5, 5), Size2D(3, 3) } };
+ const std::array<Size2D, 8> supported_input_sizes = { { Size2D(1, 3), Size2D(3, 1), Size2D(5, 5), Size2D(3, 3), Size2D(1, 5), Size2D(5, 1), Size2D(7, 1), Size2D(1, 7) } };
return std::end(supported_input_sizes) != std::find(std::begin(supported_input_sizes), std::end(supported_input_sizes), size);
}
@@ -56,10 +56,10 @@ Status validate_arguments_winograd_weight_trans(const ITensorInfo *input, const
const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
const auto input_width = input->dimension(idx_width);
const auto input_height = input->dimension(idx_height);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_kernel_size_supported(Size2D(input_width, input_height)), "Only 1x3, 3x1, 3x3 and 5x5 kernels are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_kernel_size_supported(Size2D(input_width, input_height)), "Only 1x3, 3x1, 1x5, 5x1, 7x1, 1x7, 3x3 and 5x5 kernels are supported");
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
const Size2D &output_tile = winograd_info.output_tile_size;
- const std::array<Size2D, 4> supported_tile_sizes = { { Size2D(2U, 2U), Size2D(4U, 4U), Size2D(1U, 6U), Size2D(6U, 1U) } };
+ const std::array<Size2D, 8> supported_tile_sizes = { { Size2D(2U, 2U), Size2D(4U, 4U), Size2D(1U, 6U), Size2D(6U, 1U), Size2D(4, 1), Size2D(1, 4), Size2D(2, 1), Size2D(1, 2) } };
ARM_COMPUTE_RETURN_ERROR_ON(std::end(supported_tile_sizes) == std::find(std::begin(supported_tile_sizes), std::end(supported_tile_sizes), output_tile));
// Checks performed when output is configured
@@ -305,6 +305,10 @@ template class NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>;
template class NEWinogradLayerTransformWeightsKernel<float, 1, 6, 1, 3>;
template class NEWinogradLayerTransformWeightsKernel<float, 6, 1, 3, 1>;
+template class NEWinogradLayerTransformWeightsKernel<float, 1, 4, 1, 5>;
+template class NEWinogradLayerTransformWeightsKernel<float, 4, 1, 5, 1>;
+template class NEWinogradLayerTransformWeightsKernel<float, 1, 2, 1, 7>;
+template class NEWinogradLayerTransformWeightsKernel<float, 2, 1, 7, 1>;
// Input transform
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
@@ -401,6 +405,11 @@ template class NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>;
template class NEWinogradLayerTransformInputKernel<float, 1, 6, 1, 3>;
template class NEWinogradLayerTransformInputKernel<float, 6, 1, 3, 1>;
+template class NEWinogradLayerTransformInputKernel<float, 1, 4, 1, 5>;
+template class NEWinogradLayerTransformInputKernel<float, 4, 1, 5, 1>;
+template class NEWinogradLayerTransformInputKernel<float, 1, 2, 1, 7>;
+template class NEWinogradLayerTransformInputKernel<float, 2, 1, 7, 1>;
+
// Output transform
template <typename T, int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols>
@@ -513,4 +522,9 @@ template class NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>;
template class NEWinogradLayerTransformOutputKernel<float, 1, 6, 1, 3>;
template class NEWinogradLayerTransformOutputKernel<float, 6, 1, 3, 1>;
+template class NEWinogradLayerTransformOutputKernel<float, 1, 4, 1, 5>;
+template class NEWinogradLayerTransformOutputKernel<float, 4, 1, 5, 1>;
+template class NEWinogradLayerTransformOutputKernel<float, 1, 2, 1, 7>;
+template class NEWinogradLayerTransformOutputKernel<float, 2, 1, 7, 1>;
+
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
new file mode 100644
index 0000000000..cfd2029f11
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+
+namespace winograd
+{
+
+using Transform = WinogradGEMM<1, 2, 1, 7>::OutputTransform<float>;
+using TransformTransposed = WinogradGEMM<2, 1, 7, 1>::OutputTransform<float>;
+
+template <>
+template <>
+int Transform::ops_performed(const Tensor4DShape &shape)
+{
+ (void) shape;
+ return 0; // TODO
+}
+
+template <>
+template <>
+template <int pad_bottom, int pad_right>
+void Transform::process_tile(
+ const int n_channels,
+ const float* const matrix_base,
+ const int matrix_stride,
+ const float* const biases,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ (void) output_row_stride;
+ constexpr int cells_j = output_tile_cols - pad_right;
+
+ // Construct a map to the output cells
+ float *outptrs[cells_j];
+ for (int j = 0; j < cells_j; j++)
+ {
+ outptrs[j] = output + j*output_col_stride;
+ }
+ const float *inptr = matrix_base;
+ const float *bptr = biases;
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __arm_any__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1q_f32(inptr + j*matrix_stride);
+ }
+ inptr += 4;
+
+ f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1q_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 4;
+ }
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1_f32(inptr + j*matrix_stride);
+ }
+ inptr += 2;
+
+ f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 2;
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = *(inptr + j*matrix_stride);
+ }
+ inptr++;
+
+ f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
+ f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1 + F[7]*1;
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = *(bptr++);
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ *(outptrs[j]++) = f[j] + b;
+ }
+ }
+}
+
+template <>
+template <>
+const Transform::TileFn Transform::tile_fns[max_pad_bottom][max_pad_right] =
+{
+ {
+ Transform::template process_tile<0, 0>,
+ Transform::template process_tile<0, 1>,
+ },
+};
+
+
+template <>
+template <>
+const TransformTransposed::TileFn TransformTransposed::tile_fns[max_pad_bottom][max_pad_right] = {};
+
+template struct WinogradGEMM<1, 2, 1, 7>::OutputTransform<float>;
+template struct WinogradGEMM<2, 1, 7, 1>::OutputTransform<float>;
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
new file mode 100644
index 0000000000..2417f527bf
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+
+namespace winograd
+{
+
+using Transform = WinogradGEMM<1, 4, 1, 5>::OutputTransform<float>;
+using TransformTransposed = WinogradGEMM<4, 1, 5, 1>::OutputTransform<float>;
+
+template <>
+template <>
+int Transform::ops_performed(const Tensor4DShape &shape)
+{
+ (void) shape;
+ return 0; // TODO
+}
+
+template <>
+template <>
+template <int pad_bottom, int pad_right>
+void Transform::process_tile(
+ const int n_channels,
+ const float* const matrix_base,
+ const int matrix_stride,
+ const float* const biases,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ (void) output_row_stride;
+ constexpr int cells_j = output_tile_cols - pad_right;
+
+ // Construct a map to the output cells
+ float *outptrs[cells_j];
+ for (int j = 0; j < cells_j; j++)
+ {
+ outptrs[j] = output + j*output_col_stride;
+ }
+ const float *inptr = matrix_base;
+ const float *bptr = biases;
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __arm_any__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1q_f32(inptr + j*matrix_stride);
+ }
+ inptr += 4;
+
+ f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+ f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
+ f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1q_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 4;
+ }
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1_f32(inptr + j*matrix_stride);
+ }
+ inptr += 2;
+
+ f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+ f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
+ f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 2;
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = *(inptr + j*matrix_stride);
+ }
+ inptr++;
+
+ f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
+ f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
+ f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
+ f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1 + F[7]*1;
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = *(bptr++);
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ *(outptrs[j]++) = f[j] + b;
+ }
+ }
+}
+
+template <>
+template <>
+const Transform::TileFn Transform::tile_fns[max_pad_bottom][max_pad_right] =
+{
+ {
+ Transform::template process_tile<0, 0>,
+ Transform::template process_tile<0, 1>,
+ Transform::template process_tile<0, 2>,
+ Transform::template process_tile<0, 3>,
+ },
+};
+
+template <>
+template <>
+const TransformTransposed::TileFn TransformTransposed::tile_fns[max_pad_bottom][max_pad_right] = {};
+
+
+template struct WinogradGEMM<1, 4, 1, 5>::OutputTransform<float>;
+template struct WinogradGEMM<4, 1, 5, 1>::OutputTransform<float>;
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
new file mode 100644
index 0000000000..85cf418656
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
+
+namespace winograd
+{
+ template <>
+ template <>
+ void WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[kernel_cols];
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[kernel_cols], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36.0f;
+ V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
+ V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
+ V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
+ V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
+ V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
+ V[7] = (w[6]*1) / 1.0f;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template <>
+ template <>
+ void WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Redirect to the 1xN implementation
+ WinogradGEMM<1, 2, 1, 7>::template WeightsTransform<float>::execute(
+ n_output_channels, n_input_channels, input, output, matrix_stride,
+ matrix_row_stride
+ );
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template struct WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>;
+ template struct WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>;
+}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
new file mode 100644
index 0000000000..2f14e20142
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
+
+namespace winograd
+{
+ template <>
+ template <>
+ void WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[kernel_cols];
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[kernel_cols], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36;
+ V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
+ V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
+ V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
+ V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
+ V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
+ V[7] = (w[4]*1) / 1;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template <>
+ template <>
+ void WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Redirect to the 1xN implementation
+ WinogradGEMM<1, 4, 1, 5>::template WeightsTransform<float>::execute(
+ n_output_channels, n_input_channels, input, output, matrix_stride,
+ matrix_row_stride
+ );
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template struct WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>;
+ template struct WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>;
+}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
index d544fd5710..a7de2fd3e5 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
@@ -229,3 +229,12 @@ template class WinogradGEMM<1, 6, 1, 3>::Convolution<float, float>;
template class WinogradGEMM<6, 1, 3, 1>::Convolution<float, float>;
template class WinogradGEMM<2, 2, 5, 5>::Convolution<float, float>;
+
+template class WinogradGEMM<1, 4, 1, 5>::Convolution<float, float>;
+template class WinogradGEMM<4, 1, 5, 1>::Convolution<float, float>;
+
+template class WinogradGEMM<1, 2, 1, 7>::Convolution<float, float>;
+template class WinogradGEMM<2, 1, 7, 1>::Convolution<float, float>;
+
+
+
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index ff7934e23a..e41b0be860 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -39,6 +39,121 @@ namespace arm_compute
{
namespace
{
+inline Status validate_kernel_3x3(const Size2D input_dims, const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ if(input_dims.width > 4 && input_dims.height > 4)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 4, 3, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 4, 3, 3>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 4, 3, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 3, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 3, 3>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 3, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ }
+
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
+inline Status validate_kernel_5x5(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>::validate(batched_mm_output, biases, output, winograd_info)));
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
+inline Status validate_kernel_3x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 6, 1, 3>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 6, 1, 3>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 6, 1, 3>::validate(batched_mm_output, biases, output, winograd_info)));
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
+inline Status validate_kernel_1x3(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 6, 1, 3, 1>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 6, 1, 3, 1>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 6, 1, 3, 1>::validate(batched_mm_output, biases, output, winograd_info)));
+
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
+inline Status validate_kernel_5x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 4, 1, 5>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 4, 1, 5>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 4, 1, 5>::validate(batched_mm_output, biases, output, winograd_info)));
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+inline Status validate_kernel_1x5(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 1, 5, 1>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 1, 5, 1>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 1, 5, 1>::validate(batched_mm_output, biases, output, winograd_info)));
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
+inline Status validate_kernel_7x1(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 2, 1, 7>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 2, 1, 7>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 2, 1, 7>::validate(batched_mm_output, biases, output, winograd_info)));
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
+inline Status validate_kernel_1x7(const ITensorInfo *input, const TensorInfo *input0, const TensorInfo *input1, const TensorInfo *batched_mm_output,
+ const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const WinogradInfo &winograd_info, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 1, 7, 1>::validate(input, input0, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 1, 7, 1>::validate(weights, input1, winograd_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 1, 7, 1>::validate(batched_mm_output, biases, output, winograd_info)));
+
+ if(act_info.enabled())
+ {
+ NEActivationLayer::validate(output, nullptr, act_info);
+ }
+ return Status{};
+}
+
inline Tensor4DShape internal_get_input_shape(const arm_compute::ITensor *input)
{
const DataLayout data_layout = input->info()->data_layout();
@@ -65,7 +180,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims)
{
Size2D output_tile = Size2D{};
-
if(kernel_dims == Size2D(3U, 3U))
{
output_tile = (input_dims.width <= 4 && input_dims.height <= 4) ? Size2D(2U, 2U) : Size2D(4U, 4U);
@@ -82,6 +196,22 @@ Size2D winograd_output_tile(const Size2D &input_dims, const Size2D &kernel_dims)
{
output_tile = Size2D(6U, 1U);
}
+ else if(kernel_dims == Size2D(1U, 5U))
+ {
+ output_tile = Size2D(1U, 4U);
+ }
+ else if(kernel_dims == Size2D(5U, 1U))
+ {
+ output_tile = Size2D(4U, 1U);
+ }
+ else if(kernel_dims == Size2D(7U, 1U))
+ {
+ output_tile = Size2D(2U, 1U);
+ }
+ else if(kernel_dims == Size2D(1U, 7U))
+ {
+ output_tile = Size2D(1U, 2U);
+ }
return output_tile;
}
@@ -210,6 +340,42 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
+ else if(kernel_size == Size2D(1, 5))
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 4, 1, 5, 1>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
+ else if(kernel_size == Size2D(5, 1))
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 1, 4, 1, 5>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
+ else if(kernel_size == Size2D(1, 7))
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 2, 1, 7, 1>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
+ else if(kernel_size == Size2D(7, 1))
+ {
+ using config = NEWinogradLayerConfiguration<float, float, 1, 2, 1, 7>;
+ transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ n_gemms = config::WinogradBase::N_GEMMS;
+ N_BLOCK = config::WinogradConv::N_BLOCK;
+ }
else
{
ARM_COMPUTE_ERROR("Not supported.");
@@ -417,10 +583,9 @@ Status NEWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape, kernel size and output tile
- const Size2D input_dims = Size2D(input->dimension(idx_width), input->dimension(idx_height));
- const Size2D kernel_size = Size2D(weights->dimension(idx_width), weights->dimension(idx_height));
- const Size2D output_tile = winograd_output_tile(input_dims, kernel_size);
- const bool square_kernel = kernel_size.width == kernel_size.height;
+ const Size2D input_dims = Size2D(input->dimension(idx_width), input->dimension(idx_height));
+ const Size2D kernel_size = Size2D(weights->dimension(idx_width), weights->dimension(idx_height));
+ const Size2D output_tile = winograd_output_tile(input_dims, kernel_size);
// Check if the Winograd configuration requires fast math
if(!enable_fast_math)
@@ -437,127 +602,49 @@ Status NEWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
// Validate input transform
const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape);
-
- if(square_kernel)
+ // Validate filter transform
+ const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
+ const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
+ // Validate batched matrix multiply
+ TensorShape batched_mm_output_shape = input0.tensor_shape();
+ batched_mm_output_shape[0] = input1.tensor_shape()[0];
+ const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
+
+ if(kernel_size == Size2D(3, 3))
{
- switch(weights->dimension(idx_width))
- {
- case 3:
- {
- if(input_dims.width > 4 && input_dims.height > 4)
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 4, 4, 3, 3>::validate(input, &input0, winograd_info)));
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 3, 3>::validate(input, &input0, winograd_info)));
- }
- break;
- }
- case 5:
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 2, 2, 5, 5>::validate(input, &input0, winograd_info)));
- break;
- }
- default:
- {
- ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
- break;
- }
- }
- // Validate filter transform
- const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
- const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
-
- switch(weights->dimension(idx_width))
- {
- case 3:
- {
- if(input_dims.width > 4 && input_dims.height > 4)
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 4, 4, 3, 3>::validate(weights, &input1, winograd_info)));
- }
- else
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 3, 3>::validate(weights, &input1, winograd_info)));
- }
- break;
- }
- case 5:
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 2, 2, 5, 5>::validate(weights, &input1, winograd_info)));
- break;
- }
- default:
- {
- ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
- break;
- }
- }
- // Validate batched matrix multiply
- TensorShape batched_mm_output_shape = input0.tensor_shape();
- batched_mm_output_shape[0] = input1.tensor_shape()[0];
- const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
- switch(weights->dimension(idx_width))
- {
- case 3:
- {
- if(input_dims.width > 4 && input_dims.height > 4)
- {
- // Validate output transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 4, 4, 3, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
- }
- else
- {
- // Validate output transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 3, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
- }
- break;
- }
- case 5:
- {
- // Validate output transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 2, 2, 5, 5>::validate(&batched_mm_output, biases, output, winograd_info)));
- break;
- }
- default:
- {
- ARM_COMPUTE_RETURN_ERROR_MSG("Only 3x3 and 5x5 kernels supported.");
- break;
- }
- }
+ return validate_kernel_3x3(input_dims, input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
}
- else
+ else if(kernel_size == Size2D(5, 5))
{
- const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info);
- const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape);
- TensorShape batched_mm_output_shape = input0.tensor_shape();
- batched_mm_output_shape[0] = input1.tensor_shape()[0];
- const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape);
-
- if(kernel_size == Size2D(3, 1))
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 1, 6, 1, 3>::validate(input, &input0, winograd_info)));
- // Validate filter transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 1, 6, 1, 3>::validate(weights, &input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 1, 6, 1, 3>::validate(&batched_mm_output, biases, output, winograd_info)));
- }
- else if(kernel_size == Size2D(1, 3))
- {
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformInputKernel<float, 6, 1, 3, 1>::validate(input, &input0, winograd_info)));
- // Validate filter transform
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformWeightsKernel<float, 6, 1, 3, 1>::validate(weights, &input1, winograd_info)));
- ARM_COMPUTE_RETURN_ON_ERROR((NEWinogradLayerTransformOutputKernel<float, 6, 1, 3, 1>::validate(&batched_mm_output, biases, output, winograd_info)));
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_MSG("Kernel shape not supported");
- }
+ return validate_kernel_5x5(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
}
- // Validate Activation Layer
- if(act_info.enabled())
+ if(kernel_size == Size2D(3, 1))
{
- NEActivationLayer::validate(output, nullptr, act_info);
+ return validate_kernel_3x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ }
+ else if(kernel_size == Size2D(1, 3))
+ {
+ return validate_kernel_1x3(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ }
+ else if(kernel_size == Size2D(5, 1))
+ {
+ return validate_kernel_5x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ }
+ else if(kernel_size == Size2D(1, 5))
+ {
+ return validate_kernel_1x5(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ }
+ else if(kernel_size == Size2D(7, 1))
+ {
+ return validate_kernel_7x1(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ }
+ else if(kernel_size == Size2D(1, 7))
+ {
+ return validate_kernel_1x7(input, &input0, &input1, &batched_mm_output, weights, biases, output, winograd_info, act_info);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Kernel shape not supported");
}
return Status{};
}
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 591af7b731..bb073b08ee 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -165,6 +165,50 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, frame
TEST_SUITE_END() // Conv3x1
+TEST_SUITE(Conv1x5)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x5Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x5Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+
+TEST_SUITE_END() // Conv1x5
+
+TEST_SUITE(Conv5x1)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x1Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x1Dataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
+
+TEST_SUITE_END() // Conv5x1
+
TEST_SUITE(Conv3x3)
FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),