aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-10-05 10:59:48 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:55:45 +0000
commitd3d97d27645efe90505a62cd48079ad06a7cf283 (patch)
tree2305b5f0b10b93f355413dd8070771537645492b /arm_compute
parent709d27bcb451f4425e688024c629830e04b61cb0 (diff)
downloadComputeLibrary-d3d97d27645efe90505a62cd48079ad06a7cf283.tar.gz
COMPMID-1623: NEWinograd reduce the number of output tiles.
Change-Id: I4d9240924fe483d2dd127ad6a4ae6f8066f61bd1 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/151893 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Andrew Mundy <andrew.mundy@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp186
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp95
-rw-r--r--arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp232
3 files changed, 359 insertions, 154 deletions
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
index 00327f5102..77cd9de513 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/transforms/output.hpp
@@ -27,10 +27,10 @@
namespace winograd
{
- template <int output_tile_rows, int output_tile_cols,
- int kernel_rows, int kernel_cols>
- template <typename T>
- void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::OutputTransform<T>::execute(
+/***************************************************************************/
+ /* Instance-less API */
+ template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ void OutputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::execute(
const int n_batches,
const int output_batch_stride,
const int n_rows,
@@ -45,28 +45,12 @@ namespace winograd
T* const output
)
{
- // If an Nx1 kernel then transpose and redirect to the 1xN implementation.
- if (kernel_cols == 1)
- {
- WinogradGEMM<output_tile_cols, output_tile_rows, kernel_cols, kernel_rows>::
- template OutputTransform<T>::execute(
- n_batches,
- output_batch_stride,
- n_cols, output_col_stride,
- n_rows, output_row_stride,
- n_channels,
- matrix_base, matrix_stride, matrix_row_stride,
- biases, output
- );
- return;
- }
-
// Compute the number of tiles and hence the padding required on the bottom
// and right of the image.
- const int tile_M = iceildiv(n_rows, output_tile_rows);
- const int tile_N = iceildiv(n_cols, output_tile_cols);
- const int pad_bottom = output_tile_rows*tile_M - n_rows;
- const int pad_right = output_tile_cols*tile_N - n_cols;
+ const int tile_M = iceildiv(n_rows, OutputTileRows);
+ const int tile_N = iceildiv(n_cols, OutputTileCols);
+ const int pad_bottom = OutputTileRows*tile_M - n_rows;
+ const int pad_right = OutputTileCols*tile_N - n_cols;
const int matrix_tile_row_stride = tile_N * matrix_row_stride;
const int matrix_batch_stride = tile_M * matrix_tile_row_stride;
@@ -84,7 +68,7 @@ namespace winograd
// Compute properties of this row of output tiles
const int row_pad_bottom = (tile_i < tile_M - 1) ? 0: pad_bottom;
const T* const matrix_tile_row = matrix_batch + tile_i * matrix_tile_row_stride;
- T* const outptr_row = outptr_batch + output_tile_rows*tile_i*output_row_stride;
+ T* const outptr_row = outptr_batch + OutputTileRows*tile_i*output_row_stride;
// Process the row
process_tile_row(
@@ -97,10 +81,36 @@ namespace winograd
}
}
- template <int output_tile_rows, int output_tile_cols,
- int kernel_rows, int kernel_cols>
- template <typename T>
- void WinogradGEMM<output_tile_rows, output_tile_cols, kernel_rows, kernel_cols>::OutputTransform<T>::process_tile_row(
+template <int KernelRows, int InnerTileRows, typename T>
+ void OutputTransformImpl<KernelRows, 1, InnerTileRows, 1, T>::execute(
+ const int n_batches,
+ const int output_batch_stride,
+ const int n_rows,
+ const int output_row_stride,
+ const int n_cols,
+ const int output_col_stride,
+ const int n_channels,
+ const T* const matrix_base,
+ const int matrix_stride,
+ const int matrix_row_stride,
+ const T* const biases,
+ T* const output
+ )
+ {
+ // If an Nx1 kernel then transpose and redirect to the 1xN implementation.
+ OutputTransformImpl<1, KernelRows, 1, InnerTileRows, T>::execute(
+ n_batches,
+ output_batch_stride,
+ n_cols, output_col_stride,
+ n_rows, output_row_stride,
+ n_channels,
+ matrix_base, matrix_stride, matrix_row_stride,
+ biases, output
+ );
+ }
+
+ template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ void OutputTransformImpl<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::process_tile_row(
const int tile_N,
const int n_channels,
const T* const matrix_base,
@@ -114,48 +124,27 @@ namespace winograd
const int row_pad_right
)
{
- if (kernel_cols == 1)
- {
- // If an Nx1 implementation then this should never be reached.
- return;
- }
-
// Loop over columns of tiles
for (int tile_j = 0; tile_j < tile_N; tile_j++)
{
// Properties of this tile
const int tile_pad_right = (tile_j < tile_N - 1) ? 0 : row_pad_right;
const T* const matrix_row = matrix_base + tile_j * matrix_row_stride;
- T* const outptr = output + output_tile_cols*tile_j*output_col_stride;
+ T* const outptr = output + OutputTileCols *tile_j*output_col_stride;
// Perform the output transformation
- tile_fns[row_pad_bottom][tile_pad_right](
+ const typename Tiles::TileFn tilefn = Tiles::get_tile_specialization(row_pad_bottom, tile_pad_right);
+ tilefn(
n_channels, matrix_row, matrix_stride, biases,
- outptr, output_row_stride, output_col_stride
+ outptr, output_row_stride, output_col_stride,
+ row_pad_bottom, tile_pad_right
);
}
}
- template <int output_tile_rows, int output_tile_cols, int kr, int kc>
- template <typename T>
- size_t WinogradGEMM<output_tile_rows, output_tile_cols, kr, kc>::OutputTransform<T>::bytes_read(const Tensor4DShape &shape)
- {
- const int M = iceildiv(shape.n_rows, output_tile_rows) *
- iceildiv(shape.n_cols, output_tile_cols);
- const int N = shape.n_channels;
- return inner_tile_rows * inner_tile_cols * M * N * sizeof(T);
- }
-
- template <int otr, int otc, int kr, int kc>
- template <typename T>
- size_t WinogradGEMM<otr, otc, kr, kc>::OutputTransform<T>::bytes_written(const Tensor4DShape &shape)
- {
- return shape.size() * sizeof(T);
- }
-
- template <int output_tile_rows, int output_tile_cols, int kr, int kc>
- template <typename T>
- WinogradGEMM<output_tile_rows, output_tile_cols, kr, kc>::OutputTransform<T>::OutputTransform(
+/***************************************************************************/
+ template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::OutputTransform(
const T* const matrix_base,
const int matrix_stride,
const int matrix_row_stride,
@@ -171,26 +160,24 @@ namespace winograd
) : _matrix_base(matrix_base), _biases(biases),
_matrix_stride(matrix_stride), _matrix_row_stride(matrix_row_stride),
_outptr(output), _n_batches(n_batches), _n_rows(n_rows), _n_cols(n_cols),
- _n_channels(n_channels), _tile_M(iceildiv(n_rows, output_tile_rows)),
- _tile_N(iceildiv(n_cols, output_tile_cols)),
+ _n_channels(n_channels), _tile_M(iceildiv(n_rows, OutputTileRows)),
+ _tile_N(iceildiv(n_cols, OutputTileCols)),
_out_col_stride(out_col_stride ? out_col_stride : n_channels),
_out_row_stride(out_row_stride ? out_row_stride : n_cols * _out_col_stride),
_out_batch_stride(out_batch_stride ? out_batch_stride : n_rows * _out_row_stride)
{
}
- template <int otr, int otc, int kr, int kc>
- template <typename T>
- unsigned int WinogradGEMM<otr, otc, kr, kc>::OutputTransform<T>::get_window() const
+ template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ unsigned int OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::get_window() const
{
// The final window includes the tail, all other windows will be a multiple
// of the window block in size.
return iceildiv(_n_channels, WINDOW_BLOCK);
}
- template <int otr, int otc, int kr, int kc>
- template <typename T>
- void WinogradGEMM<otr, otc, kr, kc>::OutputTransform<T>::run(
+template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ void OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::run(
const unsigned int start, const unsigned int stop
)
{
@@ -221,4 +208,71 @@ namespace winograd
_outptr + start_channel
);
}
+
+ template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ void OutputTransform<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::execute(
+ const int n_batches,
+ const int out_batch_stride,
+ const int n_rows,
+ const int out_row_stride,
+ const int n_cols,
+ const int out_col_stride,
+ const int n_channels,
+ const T* const matrix_base,
+ const int matrix_stride,
+ const int matrix_row_stride,
+ const T* const biases,
+ T* const output
+ )
+ {
+ Transform::execute(
+ n_batches, out_batch_stride,
+ n_rows, out_row_stride,
+ n_cols, out_col_stride, n_channels,
+ matrix_base, matrix_stride, matrix_row_stride,
+ biases, output
+ );
+ }
+
+ template <int KernelCols, int InnerTileCols, typename T>
+ typename OutputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>::TileFn
+ OutputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>::
+ get_tile_specialization(const int pad_bottom, const int pad_right)
+ {
+ (void) pad_bottom;
+
+ if (!pad_right)
+ {
+ // No padding, return unpadded specialisation
+ return tilefn_unpadded;
+ }
+ else
+ {
+ return tilefn_right_padded[pad_right - 1];
+ }
+ }
+
+ template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+ typename OutputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::TileFn
+ OutputTransformImplTiles<KernelRows, KernelCols, InnerTileRows, InnerTileCols, T>::
+ get_tile_specialization(const int pad_bottom, const int pad_right)
+ {
+ if (!(pad_bottom || pad_right))
+ {
+ // No padding, return unpadded specialisation
+ return tilefn_unpadded;
+ }
+ else if (pad_bottom && !pad_right)
+ {
+ return tilefn_bottom_padded[pad_bottom - 1];
+ }
+ else if (!pad_bottom && pad_right)
+ {
+ return tilefn_right_padded[pad_right - 1];
+ }
+ else
+ {
+ return tilefn_generic;
+ }
+ }
} // namespace winograd
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
index 31aee35fab..71b5fd516f 100644
--- a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_gemm.hpp
@@ -31,6 +31,7 @@
#include "arm_compute/core/NEON/kernels/convolution/common/tensor.hpp"
#include "arm_compute/core/NEON/kernels/convolution/common/utils.hpp"
#include "winograd_input_transform.hpp"
+#include "winograd_output_transform.hpp"
#include <thread>
#include <utility>
@@ -124,95 +125,13 @@ class WinogradGEMM
/** Transform output feature maps from the Winograd to the spatial domain.
*/
template <typename T>
- struct OutputTransform
- {
- /** Get the bytes read during the transform. */
- static size_t bytes_read(const Tensor4DShape &shape);
-
- /** Get the bytes written during the transform. */
- static size_t bytes_written(const Tensor4DShape &shape);
-
- /** Get the count of operations performed by the transform. */
- static int ops_performed(const Tensor4DShape &shape);
-
- /** Apply the transform to create a tensor. */
- static void execute(
- const int n_batches,
- const int out_batch_stride,
- const int n_rows,
- const int out_row_stride,
- const int n_cols,
- const int out_col_stride,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output
- );
-
- /***********************************************************************/
- /** Create an OutputTransform operator fixed on a given problem and set
- * of pointers.
- */
- OutputTransform(
- const T* const matrix_base, /** Pointer to base of matrices. */
- const int matrix_stride, /** Stride between matrices. */
- const int matrix_row_stride, /** Stride within a matrix. */
- const T* const biases, /** Pointer to biases vector. */
- T* const output, /** Pointer to output tensor. */
- const int n_batches, /** Number of batches in output tensor. */
- const int n_rows, /** Number of rows in output tensor. */
- const int n_cols, /** Number of columns in output tensor. */
- const int n_channels, /** Number of channels in output tensor. */
- const int out_batch_stride=0, /** Output batch stride. */
- const int out_row_stride=0, /** Output row stride. */
- const int out_col_stride=0 /** Output column stride. */
- );
-
- /** Get the window of work a given operator can perform. */
- unsigned int get_window() const;
- static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
-
- /** Perform work upon a window of the input. */
- void run(const unsigned int start, const unsigned int stop);
- /***********************************************************************/
-
- private:
- static void process_tile_row(
- const int tile_N,
- const int n_channels,
- const T* const matrix_base,
- const int matrix_stride,
- const int matrix_row_stride,
- const T* const biases,
- T* const output,
- const int output_row_stride,
- const int output_col_stride,
- const int row_pad_bottom,
- const int row_pad_right
- );
-
- // Limits on the amount of anti-padding to be applied
- static constexpr int max_pad_bottom = output_tile_rows;
- static constexpr int max_pad_right = output_tile_cols;
-
- /** Prepare a single tile of the output tensor. */
- template <int pad_bottom, int pad_right>
- static void process_tile(int, const T*, int, const T*, T*, int, int);
-
- // Array of methods to produce tiles of output tensor.
- typedef void (*TileFn)(int, const T*, int, const T*, T*, int, int);
- static const TileFn tile_fns[max_pad_bottom][max_pad_right];
+ using OutputTransform = OutputTransform<
+ KernelRows, KernelCols,
+ (OutputTileRows + KernelRows - 1),
+ (OutputTileCols + KernelCols - 1),
+ T
+ >;
- /** Member constants for instances of the transform. */
- const T* const _matrix_base;
- const T* const _biases;
- const int _matrix_stride, _matrix_row_stride;
- T* const _outptr;
- const int _n_batches, _n_rows, _n_cols, _n_channels, _tile_M, _tile_N;
- const int _out_col_stride, _out_row_stride, _out_batch_stride;
- };
/** Perform a convolution.
*/
diff --git a/arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp
new file mode 100644
index 0000000000..07a0b8666a
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/convolution/winograd/winograd_output_transform.hpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace winograd
+{
+
+
+namespace
+{
+
+template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+class OutputTransformImplTiles
+{
+ public:
+ typedef void (*TileFn)(
+ const int n_channels, /** @param[in] Number of channels in output tensor */
+ const T* const matrix_base, /** @param[in] Base pointer to Winograd output matrices. */
+ const int matrix_stride, /** @param[in] Stride between matrices in the output space. */
+ const T* const biases, /** @param[in] Pointer to bias vector (may be nullptr). */
+ T* const output, /** @param[out] Pointer to output tensor. */
+ const int output_row_stride, /** @param[in] Stride across rows of the output tensor. */
+ const int output_col_stride, /** @param[in] Stride between columns of the output tensor. */
+ const int _pad_bottom, /** @param[in] Bottom padding for unspecialised tiles. */
+ const int _pad_right /** @param[in] Right padding for unspecialised tiles. */
+ );
+
+ static TileFn get_tile_specialization(
+ const int pad_bottom,
+ const int pad_right
+ );
+
+ static constexpr unsigned int OutputTileRows = InnerTileRows - KernelRows + 1;
+ static constexpr unsigned int OutputTileCols = InnerTileCols - KernelCols + 1;
+
+ private:
+ static constexpr unsigned int n_pad_bottom = OutputTileRows - 1;
+ static constexpr unsigned int n_pad_right = OutputTileCols - 1;
+
+ static const TileFn tilefn_generic; /** Generic tile processing function. */
+ static const TileFn tilefn_unpadded; /** Tile processor for unpadded tiles. */
+ static const TileFn tilefn_bottom_padded[n_pad_bottom]; /** Bottom padding only. */
+ static const TileFn tilefn_right_padded[n_pad_right]; /** Right padding only. */
+};
+
+template <int KernelCols, int InnerTileCols, typename T>
+class OutputTransformImplTiles<1, KernelCols, 1, InnerTileCols, T>
+{
+ public:
+ typedef void (*TileFn)(
+ const int n_channels, /** @param[in] Number of channels in output tensor */
+ const T* const matrix_base, /** @param[in] Base pointer to Winograd output matrices. */
+ const int matrix_stride, /** @param[in] Stride between matrices in the output space. */
+ const T* const biases, /** @param[in] Pointer to bias vector (may be nullptr). */
+ T* const output, /** @param[out] Pointer to output tensor. */
+ const int output_row_stride, /** @param[in] Stride across rows of the output tensor. */
+ const int output_col_stride, /** @param[in] Stride between columns of the output tensor. */
+ const int _pad_bottom, /** @param[in] Bottom padding for unspecialised tiles. */
+ const int _pad_right /** @param[in] Right padding for unspecialised tiles. */
+ );
+
+ static TileFn get_tile_specialization(
+ const int pad_bottom,
+ const int pad_right
+ );
+
+ static constexpr unsigned int OutputTileRows = 1;
+ static constexpr unsigned int OutputTileCols = InnerTileCols - KernelCols + 1;
+
+ private:
+ static constexpr unsigned int n_pad_right = OutputTileCols - 1;
+
+ static const TileFn tilefn_unpadded; /** Tile processor for unpadded tiles. */
+ static const TileFn tilefn_right_padded[n_pad_right]; /** Right padding only. */
+};
+
+template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+class OutputTransformImpl
+{
+ private:
+ static void process_tile_row(
+ const int tile_N,
+ const int n_channels,
+ const T* const matrix_base,
+ const int matrix_stride,
+ const int matrix_row_stride,
+ const T* const biases,
+ T* const output,
+ const int output_row_stride,
+ const int output_col_stride,
+ const int row_pad_bottom,
+ const int row_pad_right
+ );
+
+ using Tiles = OutputTransformImplTiles<
+ KernelRows, KernelCols, InnerTileRows, InnerTileCols, T
+ >;
+
+ public:
+ /** Apply the output transform to a tensor. */
+ static void execute(
+ const int n_batches,
+ const int out_batch_stride,
+ const int n_rows,
+ const int out_row_stride,
+ const int n_cols,
+ const int out_col_stride,
+ const int n_channels,
+ const T* const matrix_base,
+ const int matrix_stride,
+ const int matrix_row_stride,
+ const T* const biases,
+ T* const output
+ );
+
+ static constexpr unsigned int OutputTileRows = Tiles::OutputTileRows;
+ static constexpr unsigned int OutputTileCols = Tiles::OutputTileCols;
+};
+
+template <int KernelRows, int InnerTileRows, typename T>
+class OutputTransformImpl<KernelRows, 1, InnerTileRows, 1, T>
+{
+ public:
+ /** Apply the output transform to a tensor. */
+ static void execute(
+ const int n_batches,
+ const int out_batch_stride,
+ const int n_rows,
+ const int out_row_stride,
+ const int n_cols,
+ const int out_col_stride,
+ const int n_channels,
+ const T* const matrix_base,
+ const int matrix_stride,
+ const int matrix_row_stride,
+ const T* const biases,
+ T* const output
+ );
+
+ static constexpr unsigned int OutputTileRows = InnerTileRows - KernelRows + 1;
+ static constexpr unsigned int OutputTileCols = 1;
+};
+
+} // namespace (anonymous)
+
+template <int KernelRows, int KernelCols, int InnerTileRows, int InnerTileCols, typename T>
+class OutputTransform
+{
+ public:
+ /***********************************************************************/
+ /** Create an OutputTransform operator fixed on a given problem and set
+ * of pointers.
+ */
+ OutputTransform(
+ const T* const matrix_base, /** Pointer to base of matrices. */
+ const int matrix_stride, /** Stride between matrices. */
+ const int matrix_row_stride, /** Stride within a matrix. */
+ const T* const biases, /** Pointer to biases vector. */
+ T* const output, /** Pointer to output tensor. */
+ const int n_batches, /** Number of batches in output tensor. */
+ const int n_rows, /** Number of rows in output tensor. */
+ const int n_cols, /** Number of columns in output tensor. */
+ const int n_channels, /** Number of channels in output tensor. */
+ const int out_batch_stride=0, /** Output batch stride. */
+ const int out_row_stride=0, /** Output row stride. */
+ const int out_col_stride=0 /** Output column stride. */
+ );
+
+ /** Get the window of work a given operator can perform. */
+ unsigned int get_window() const;
+ static constexpr unsigned int WINDOW_BLOCK = 16; // Base size of window
+
+ /** Perform work upon a window of the input. */
+ void run(const unsigned int start, const unsigned int stop);
+
+ /** Apply the transform to create a tensor. */
+ static void execute(
+ const int n_batches,
+ const int out_batch_stride,
+ const int n_rows,
+ const int out_row_stride,
+ const int n_cols,
+ const int out_col_stride,
+ const int n_channels,
+ const T* const matrix_base,
+ const int matrix_stride,
+ const int matrix_row_stride,
+ const T* const biases,
+ T* const output
+ );
+
+ private:
+ using Transform = OutputTransformImpl<
+ KernelRows, KernelCols, InnerTileRows, InnerTileCols, T
+ >;
+
+ static constexpr unsigned int OutputTileRows = Transform::OutputTileRows;
+ static constexpr unsigned int OutputTileCols = Transform::OutputTileCols;
+
+ /** Member constants for instances of the transform. */
+ const T* const _matrix_base;
+ const T* const _biases;
+ const int _matrix_stride, _matrix_row_stride;
+ T* const _outptr;
+ const int _n_batches, _n_rows, _n_cols, _n_channels, _tile_M, _tile_N;
+ const int _out_col_stride, _out_row_stride, _out_batch_stride;
+};
+
+} // namespace winograd
+