aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/convolution
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-09-03 16:59:20 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit000d33a0e4bfc129a8f2968d4e5ee0793df70a1e (patch)
tree6b850cb3ffad4be4970b9ab6b6c1ef74a1be69af /src/core/NEON/kernels/convolution
parente70ebc17658840f2099facca72e2194ae593f820 (diff)
downloadComputeLibrary-000d33a0e4bfc129a8f2968d4e5ee0793df70a1e.tar.gz
COMPMID-1552: support kernels sizes 1x7, 7x1, 1x5, 5x1 in NEWinograd
Refactored the validate method to make it easier to maintain in the future when adding support for new kernels sizes Change-Id: I12d9fe7af15ceb0e655cef61ca94407558fb29e8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146713 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/convolution')
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp170
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp178
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp124
-rw-r--r--src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp9
5 files changed, 605 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
new file mode 100644
index 0000000000..cfd2029f11
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/output_2_7_fp32.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+
+namespace winograd
+{
+
+using Transform = WinogradGEMM<1, 2, 1, 7>::OutputTransform<float>;
+using TransformTransposed = WinogradGEMM<2, 1, 7, 1>::OutputTransform<float>;
+
+template <>
+template <>
+int Transform::ops_performed(const Tensor4DShape &shape)
+{
+ (void) shape;
+ return 0; // TODO
+}
+
+template <>
+template <>
+template <int pad_bottom, int pad_right>
+void Transform::process_tile(
+ const int n_channels,
+ const float* const matrix_base,
+ const int matrix_stride,
+ const float* const biases,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ (void) output_row_stride;
+ constexpr int cells_j = output_tile_cols - pad_right;
+
+ // Construct a map to the output cells
+ float *outptrs[cells_j];
+ for (int j = 0; j < cells_j; j++)
+ {
+ outptrs[j] = output + j*output_col_stride;
+ }
+ const float *inptr = matrix_base;
+ const float *bptr = biases;
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __arm_any__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1q_f32(inptr + j*matrix_stride);
+ }
+ inptr += 4;
+
+ f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1q_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 4;
+ }
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1_f32(inptr + j*matrix_stride);
+ }
+ inptr += 2;
+
+ f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 2;
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = *(inptr + j*matrix_stride);
+ }
+ inptr++;
+
+ f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
+ f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1 + F[7]*1;
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = *(bptr++);
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ *(outptrs[j]++) = f[j] + b;
+ }
+ }
+}
+
+template <>
+template <>
+const Transform::TileFn Transform::tile_fns[max_pad_bottom][max_pad_right] =
+{
+ {
+ Transform::template process_tile<0, 0>,
+ Transform::template process_tile<0, 1>,
+ },
+};
+
+
+template <>
+template <>
+const TransformTransposed::TileFn TransformTransposed::tile_fns[max_pad_bottom][max_pad_right] = {};
+
+template struct WinogradGEMM<1, 2, 1, 7>::OutputTransform<float>;
+template struct WinogradGEMM<2, 1, 7, 1>::OutputTransform<float>;
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
new file mode 100644
index 0000000000..2417f527bf
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/output_4_5_fp32.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+
+namespace winograd
+{
+
+using Transform = WinogradGEMM<1, 4, 1, 5>::OutputTransform<float>;
+using TransformTransposed = WinogradGEMM<4, 1, 5, 1>::OutputTransform<float>;
+
+template <>
+template <>
+int Transform::ops_performed(const Tensor4DShape &shape)
+{
+ (void) shape;
+ return 0; // TODO
+}
+
+template <>
+template <>
+template <int pad_bottom, int pad_right>
+void Transform::process_tile(
+ const int n_channels,
+ const float* const matrix_base,
+ const int matrix_stride,
+ const float* const biases,
+ float* const output,
+ const int output_row_stride,
+ const int output_col_stride
+)
+{
+ (void) output_row_stride;
+ constexpr int cells_j = output_tile_cols - pad_right;
+
+ // Construct a map to the output cells
+ float *outptrs[cells_j];
+ for (int j = 0; j < cells_j; j++)
+ {
+ outptrs[j] = output + j*output_col_stride;
+ }
+ const float *inptr = matrix_base;
+ const float *bptr = biases;
+
+ // For each channel of the output
+ int channels_remaining = n_channels;
+#ifdef __arm_any__
+ for (; channels_remaining >= 4; channels_remaining -= 4)
+ {
+ // Matrices used and computed during this transform
+ float32x4_t F[inner_tile_cols], f[output_tile_cols], b = vdupq_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1q_f32(inptr + j*matrix_stride);
+ }
+ inptr += 4;
+
+ f[0] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+ f[2] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
+ f[3] = vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmlaq_n_f32(vmulq_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1q_f32(bptr);
+ bptr += 4;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1q_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 4;
+ }
+ }
+ for (; channels_remaining >= 2; channels_remaining -= 2)
+ {
+ // Matrices used and computed during this transform
+ float32x2_t F[inner_tile_cols], f[output_tile_cols], b = vdup_n_f32(0.0f);
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = vld1_f32(inptr + j*matrix_stride);
+ }
+ inptr += 2;
+
+ f[0] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[6], 1), F[5], 1), F[4], 1), F[3], 1), F[2], 1), F[1], 1), F[0], 1);
+ f[1] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[6], 3), F[4], 2), F[3], -2), F[5], -3), F[1], -1);
+ f[2] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[2], 1), F[1], 1), F[6], 9), F[5], 9), F[4], 4), F[3], 4);
+ f[3] = vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmla_n_f32(vmul_n_f32(F[7], 1), F[2], 1), F[6], 27), F[4], 8), F[3], -8), F[5], -27), F[1], -1);
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = vld1_f32(bptr);
+ bptr += 2;
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ vst1_f32(outptrs[j], f[j] + b);
+ outptrs[j] += 2;
+ }
+ }
+#endif // __arm_any__
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed during this transform
+ float F[inner_tile_cols], f[output_tile_cols], b = 0.0f;
+
+ // Read a 1x8 tile in the Winograd domain
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ F[j] = *(inptr + j*matrix_stride);
+ }
+ inptr++;
+
+ f[0] = F[0]*1 + F[1]*1 + F[2]*1 + F[3]*1 + F[4]*1 + F[5]*1 + F[6]*1;
+ f[1] = F[1]*-1 + F[5]*-3 + F[3]*-2 + F[4]*2 + F[6]*3 + F[2]*1;
+ f[2] = F[3]*4 + F[4]*4 + F[5]*9 + F[6]*9 + F[1]*1 + F[2]*1;
+ f[3] = F[1]*-1 + F[5]*-27 + F[3]*-8 + F[4]*8 + F[6]*27 + F[2]*1 + F[7]*1;
+
+ // Write out the output tile
+ if (bptr != 0)
+ {
+ b = *(bptr++);
+ }
+ for (int j = 0; j < cells_j; j++)
+ {
+ *(outptrs[j]++) = f[j] + b;
+ }
+ }
+}
+
+template <>
+template <>
+const Transform::TileFn Transform::tile_fns[max_pad_bottom][max_pad_right] =
+{
+ {
+ Transform::template process_tile<0, 0>,
+ Transform::template process_tile<0, 1>,
+ Transform::template process_tile<0, 2>,
+ Transform::template process_tile<0, 3>,
+ },
+};
+
+template <>
+template <>
+const TransformTransposed::TileFn TransformTransposed::tile_fns[max_pad_bottom][max_pad_right] = {};
+
+
+template struct WinogradGEMM<1, 4, 1, 5>::OutputTransform<float>;
+template struct WinogradGEMM<4, 1, 5, 1>::OutputTransform<float>;
+} // namespace winograd
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
new file mode 100644
index 0000000000..85cf418656
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/weights_2_7_fp32.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
+
+namespace winograd
+{
+ template <>
+ template <>
+ void WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[kernel_cols];
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[kernel_cols], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36.0f;
+ V[1] = (w[1]*-1 + w[3]*-1 + w[5]*-1 + w[0]*1 + w[2]*1 + w[4]*1 + w[6]*1) / 48.0f;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1 + w[5]*1 + w[6]*1) / 48.0f;
+ V[3] = (w[0]*-1 + w[6]*-64 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8 + w[5]*32) / 120.0f;
+ V[4] = (w[0]*-1 + w[6]*-64 + w[5]*-32 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120.0f;
+ V[5] = (w[5]*-243 + w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[6]*729 + w[0]*1) / 720.0f;
+ V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[5]*243 + w[6]*729 + w[0]*1) / 720.0f;
+ V[7] = (w[6]*1) / 1.0f;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template <>
+ template <>
+ void WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Redirect to the 1xN implementation
+ WinogradGEMM<1, 2, 1, 7>::template WeightsTransform<float>::execute(
+ n_output_channels, n_input_channels, input, output, matrix_stride,
+ matrix_row_stride
+ );
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template struct WinogradGEMM<1, 2, 1, 7>::WeightsTransform<float>;
+ template struct WinogradGEMM<2, 1, 7, 1>::WeightsTransform<float>;
+}
diff --git a/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
new file mode 100644
index 0000000000..2f14e20142
--- /dev/null
+++ b/src/core/NEON/kernels/convolution/winograd/transforms/weights_4_5_fp32.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/convolution/common/arm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp"
+#include "arm_compute/core/NEON/kernels/convolution/winograd/transforms/kernel.hpp"
+
+namespace winograd
+{
+ template <>
+ template <>
+ void WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Get pointers to each cell of the weight tensor
+ const auto weight_col_stride = n_input_channels * n_output_channels;
+ const float *inptrs[kernel_cols];
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ inptrs[j] = input + j*weight_col_stride;
+ }
+
+ // For each input channel
+ for (int ic = 0; ic < n_input_channels; ic++)
+ {
+ float *outptr = output + ic * matrix_row_stride;
+
+ // For each output channel
+ int channels_remaining = n_output_channels;
+ for (; channels_remaining; channels_remaining--)
+ {
+ // Matrices used and computed in this kernel
+ float w[kernel_cols], V[inner_tile_cols];
+
+ // Read weights
+ for (int j = 0; j < kernel_cols; j++)
+ {
+ w[j] = *(inptrs[j]++);
+ }
+
+ // Compute V = w WT
+ V[0] = (w[0]*-1) / 36;
+ V[1] = (w[1]*-1 + w[3]*-1 + w[0]*1 + w[2]*1 + w[4]*1) / 48;
+ V[2] = (w[0]*1 + w[1]*1 + w[2]*1 + w[3]*1 + w[4]*1) / 48;
+ V[3] = (w[0]*-1 + w[4]*-16 + w[2]*-4 + w[1]*2 + w[3]*8) / 120;
+ V[4] = (w[0]*-1 + w[4]*-16 + w[3]*-8 + w[2]*-4 + w[1]*-2) / 120;
+ V[5] = (w[3]*-27 + w[1]*-3 + w[2]*9 + w[4]*81 + w[0]*1) / 720;
+ V[6] = (w[1]*3 + w[2]*9 + w[3]*27 + w[4]*81 + w[0]*1) / 720;
+ V[7] = (w[4]*1) / 1;
+
+ // Store the transformed weights
+ for (int j = 0; j < inner_tile_cols; j++)
+ {
+ *(outptr + j*matrix_stride) = V[j];
+ }
+ outptr++;
+ }
+ }
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template <>
+ template <>
+ void WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::execute(
+ const int n_output_channels,
+ const int n_input_channels,
+ const float* const input, // NOTE: Data in HWIO order
+ float* const output,
+ const int matrix_stride,
+ const int matrix_row_stride
+ )
+ {
+ // Redirect to the 1xN implementation
+ WinogradGEMM<1, 4, 1, 5>::template WeightsTransform<float>::execute(
+ n_output_channels, n_input_channels, input, output, matrix_stride,
+ matrix_row_stride
+ );
+ }
+
+ template <>
+ template <>
+ int WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>::ops_performed(const KernelShape &shape)
+ {
+ (void) shape;
+ return 0; // TODO
+ }
+
+ template struct WinogradGEMM<1, 4, 1, 5>::WeightsTransform<float>;
+ template struct WinogradGEMM<4, 1, 5, 1>::WeightsTransform<float>;
+}
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
index d544fd5710..a7de2fd3e5 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_gemm.cpp
@@ -229,3 +229,12 @@ template class WinogradGEMM<1, 6, 1, 3>::Convolution<float, float>;
template class WinogradGEMM<6, 1, 3, 1>::Convolution<float, float>;
template class WinogradGEMM<2, 2, 5, 5>::Convolution<float, float>;
+
+template class WinogradGEMM<1, 4, 1, 5>::Convolution<float, float>;
+template class WinogradGEMM<4, 1, 5, 1>::Convolution<float, float>;
+
+template class WinogradGEMM<1, 2, 1, 7>::Convolution<float, float>;
+template class WinogradGEMM<2, 1, 7, 1>::Convolution<float, float>;
+
+
+