aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-07-23 16:42:59 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit3d677ccee046cd384abf2142f323f8e9e7a4834f (patch)
tree2e0d86a1b2438cb94386c55d1bc89b3e1061214c /arm_compute/core
parent597a85666a84c9a9414264966651551564b79299 (diff)
downloadComputeLibrary-3d677ccee046cd384abf2142f323f8e9e7a4834f.tar.gz
COMPMID-1406: Refactor gemm_interleaved to use our own types and scheduler
- Ported PrepareB kernel from gemm_interleave - Ported TransformA feature from gemm_interleave - Allocate reshaped a and b buffers - Added memory_manager / memory_group - MatrixMultiply kernel - Interleave kernels execution. - Fixed a few bugs: all nightly Convolution tests passing for threads=1 and threads=4 - Added Doxygen documentations and comments in the code - Added support for all data types supported Change-Id: Iffa1c09fda0bb9c61213bb83524d5a48e7ecb03c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141281 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/NEON/kernels/assembly/Helpers.h101
-rw-r--r--arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h132
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h129
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h110
-rw-r--r--arm_compute/core/WindowIterator.h18
6 files changed, 495 insertions, 5 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/Helpers.h b/arm_compute/core/NEON/kernels/assembly/Helpers.h
new file mode 100644
index 0000000000..0dcba88a95
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/Helpers.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_ASSEMBLY_HELPERS_H__
+#define __ARM_COMPUTE_ASSEMBLY_HELPERS_H__
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/Utils.h"
+
+namespace arm_compute
+{
+/** Block sizes to use to break the M, N, K dimension */
+struct BlockSizes
+{
+ unsigned int k_block{ 0 }; /**< Block size alon the K dimension */
+ unsigned int x_block{ 0 }; /**< Block size along the N (x) dimension */
+ unsigned int m_round{ 0 }; /**< Block size along the M dimension (Must be a multiple of strategy_out_height) */
+ unsigned int strategy_out_height{ 0 }; /**< Number of rows (M) processed by the selected strategy */
+};
+
+/** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used
+ *
+ * @param[in] ci CPU information
+ * @param[in] M M dimension.
+ * @param[in] N N dimension.
+ * @param[in] K K dimension.
+ *
+ * @return Recommeded block sizes to use for the given M, N, K dimensions.
+ */
+template <typename strategy>
+BlockSizes calculate_block_sizes(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K)
+{
+ BlockSizes bs;
+
+ using Toi = typename strategy::operand_type;
+
+ const unsigned int L1_size = ci.get_L1_cache_size();
+ const unsigned int L2_size = ci.get_L2_cache_size();
+
+ // Work out blocking parameters
+
+ // k_block: Find out how much of the larger array can be loaded into half the cache.
+ // This should account for associative caches.
+ bs.k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
+
+ // Needs to be (at least a single) multiple of the K unroll level.
+ bs.k_block /= strategy::k_unroll();
+ bs.k_block = std::max(bs.k_block, 1U) * strategy::k_unroll();
+
+ // Now tune to presented problem size; this is how many blocks we need.
+ int num_k_blocks = DIV_CEIL(K, bs.k_block);
+
+ // So divide the space equally into that many blocks.
+ bs.k_block = DIV_CEIL(K, num_k_blocks);
+
+ // And round UP to the K unroll level required.
+ bs.k_block = ceil_to_multiple(bs.k_block, strategy::k_unroll());
+
+ // x_block: Work out how many rows (of length k_block) will fit in the L2
+ // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
+ bs.x_block = (((L2_size * 9) / 10) - (bs.k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) / (sizeof(Toi) * bs.k_block);
+
+ // Needs to be (at least a single) multiple of the kernel output width.
+ bs.x_block /= strategy::out_width();
+ bs.x_block = std::max(bs.x_block, 1U) * strategy::out_width();
+
+ // And tune to the presented problem size.
+ int num_x_blocks = DIV_CEIL(N, bs.x_block);
+ bs.x_block = DIV_CEIL(N, num_x_blocks);
+
+ bs.x_block = ceil_to_multiple(bs.x_block, strategy::out_width());
+
+ // Work out the rounded size of M - needed for some buffers.
+ bs.m_round = ceil_to_multiple(M, strategy::out_height());
+ bs.strategy_out_height = strategy::out_height();
+
+ return bs;
+}
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_ASSEMBLY_HELPERS_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
index 02e5b58c9d..63178a738a 100644
--- a/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
+++ b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
@@ -38,11 +38,11 @@ public:
/** Parameters defining the dimensions of the matrices being multiplied */
struct Params
{
- unsigned int M; /**< Rows in output matrix C (and input matrix A). */
- unsigned int N; /**< Columns in output matrix C (and input matrix B). */
- unsigned int K; /**< Columns of input matrix A (= rows of input matrix B). */
- unsigned int batches; /**< Number of "batched" GEMMs (unique A and C, shared B). */
- unsigned int multis; /**< Number of "multi" GEMMs (unique A, B and C). */
+ unsigned int M{ 0 }; /**< Rows in output matrix C (and input matrix A). */
+ unsigned int N{ 0 }; /**< Columns in output matrix C (and input matrix B). */
+ unsigned int K{ 0 }; /**< Columns of input matrix A (= rows of input matrix B). */
+ unsigned int batches{ 0 }; /**< Number of "batched" GEMMs (unique A and C, shared B). */
+ unsigned int multis{ 0 }; /**< Number of "multi" GEMMs (unique A, B and C). */
};
static Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *c);
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h
new file mode 100644
index 0000000000..46a05abcdb
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__
+
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Unit of work for @ref NEGEMMInterleavedMatrixMultiplyWrapper to process */
+struct MatrixMultiplyWorkload
+{
+ /** Constructor
+ *
+ * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation.
+ * @param[in] x0 First value to process along the X dimension (N).
+ * @param[in] xmax Last value to process along the X dimension (N).
+ * @param[in] k0 First value to process along the K dimension.
+ * @param[in] kmax Last value to process along the K dimension.
+ * @param[in] multi Multi index.
+ * @param[in] kern_k Number of elements along K actually processed by the kernel.
+ * @param[in] bblocks Number of x_block processed by the kernel.
+ */
+ MatrixMultiplyWorkload(unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax, unsigned int multi, int kern_k, int bblocks)
+ : _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax), _multi(multi), _kern_k(kern_k), _bblocks(bblocks)
+ {
+ }
+ unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/
+ unsigned int _x0; /**< First value to process along the X dimension (N). */
+ unsigned int _xmax; /**< Last value to process along the X dimension (N). */
+ unsigned int _k0; /**< First value to process along the K dimension. */
+ unsigned int _kmax; /**< Last value to process along the K dimension. */
+ unsigned int _multi; /**< Multi index. */
+ int _kern_k; /**< Number of elements along K actually processed by the kernel. */
+ int _bblocks; /**< Number of x_block processed by the kernel. */
+};
+
+/** Common interface for the templated wrappers around the matrix multiply NEON assembly implementations */
+class NEGEMMInterleavedMatrixMultiplyWrapper
+{
+public:
+ /** Transform the block at the given coordinates
+ *
+ * @param[in] wl Workload to process.
+ * @param[in] info Information about the current thread.
+ * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
+ * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from.
+ * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing.
+ */
+ virtual void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0;
+ /** Generate an array of workloads
+ *
+ * @param[out] workloads Container to store the generated workloads.
+ */
+ virtual void create_workloads(std::vector<MatrixMultiplyWorkload> &workloads) = 0;
+ /** Default destructor */
+ virtual ~NEGEMMInterleavedMatrixMultiplyWrapper() = default;
+};
+
+/** Equivalent to arm_gemm::GemmInterleaved's strategy::kernel() but using Compute Library types. */
+template <typename To, typename Tr, bool use_dot = false>
+class NEGEMMInterleavedMatrixMultiplyWrapperTemplate : public NEGEMMInterleavedMatrixMultiplyWrapper
+{
+public:
+ /** Configure the matrix multiplication: C = alpha * A * B + beta * C
+ *
+ * @param[in] prepared_a Already reshaped matrix A.
+ * @param[in] transformed_b Already reshaped matrix B.
+ * @param[out] tmp_c Temporary buffer to be used to store intermediate results.
+ * @param[in,out] c Result matrix C.
+ * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
+ * @param[in] block_sizes Block sizes to use for the matrix multiplication (A & B must have been reshaped using these same block sizes).
+ * @param[in] params M, N, K sizes.
+ * @param[in] is_pretransposed Is B also pretransposed ?
+ * @param[in] alpha Alpha value
+ * @param[in] beta Beta value
+ * @param[in] max_num_threads Maximum number of threads that might be used for the calculations.
+ */
+ void configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &batch_window, const BlockSizes &block_sizes,
+ const INEGEMMWrapperKernel::Params &params, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads);
+
+ // Inherited methods overridden:
+ void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override;
+ void create_workloads(std::vector<MatrixMultiplyWorkload> &workloads) override;
+
+private:
+ const ITensor *_prepared_a
+ {
+ nullptr
+ };
+ const ITensor *_transformed_b{ nullptr };
+ ITensor *_tmp_c{ nullptr };
+ ITensor *_c{ nullptr };
+ unsigned int _Nsize{ 0 };
+ unsigned int _Ksize{ 0 };
+ bool _transpose_b{ false };
+ BlockSizes _block_sizes{};
+ INEGEMMWrapperKernel::Params _params{};
+ Window _block_walker{};
+ bool _b_is_pretransposed{ false };
+ Tr _alpha{};
+ Tr _beta{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
new file mode 100644
index 0000000000..e46c33018b
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Unit of work for @ref NEGEMMInterleavedPrepareBWrapperKernel to process */
+struct PrepareBWorkload
+{
+ /** Constructor
+ *
+ * @param[in] offset_b Offset from the start of b's allocation
+ * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation.
+ * @param[in] x0 First value to process along the X dimension (N).
+ * @param[in] xmax Last value to process along the X dimension (N).
+ * @param[in] k0 First value to process along the K dimension.
+ * @param[in] kmax Last value to process along the K dimension.
+ */
+ PrepareBWorkload(unsigned int offset_b, unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax)
+ : _offset_b(offset_b), _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax)
+ {
+ }
+ unsigned int _offset_b; /**< Offset from the start of b's allocation.*/
+ unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/
+ unsigned int _x0; /**< First value to process along the X dimension (N). */
+ unsigned int _xmax; /**< Last value to process along the X dimension (N). */
+ unsigned int _k0; /**< First value to process along the K dimension. */
+ unsigned int _kmax; /**< Last value to process along the K dimension. */
+};
+
+/** Common interface for the templated wrappers around the B reshape NEON assembly implementations */
+class NEGEMMInterleavedPrepareBWrapperKernel : public INEKernel
+{
+public:
+ /** Transform the block at the given coordinates
+ *
+ * @param[in] wl Workload to process.
+ * @param[in] info Information about the current thread.
+ */
+ virtual void transform(const PrepareBWorkload &wl, const ThreadInfo &info) = 0;
+ /** Generate an array of workloads
+ *
+ * @param[out] workloads Container to store the generated workloads.
+ */
+ virtual void create_workloads(std::vector<PrepareBWorkload> &workloads) = 0;
+ /** Return the block_sizes used to resape B
+ *
+ * The same block sizes must be used to reshape A and for the matrix multiplication
+ *
+ * @return The block sizes used to reshape B.
+ */
+ virtual BlockSizes block_sizes() const = 0;
+
+ // Inherited methods overridden:
+ const char *name() const override
+ {
+ return "NEGEMMInterleavedPrepareBWrapperKernel";
+ }
+
+ bool is_parallelisable() const override
+ {
+ return false; // Can't run on arbitrary windows but can be parallelised using an array of workloads
+ }
+};
+
+/** Equivalent to arm_gemm::GemmInterleaved's strategy::transforms::PrepareB() but using Compute Library types.
+ */
+template <typename To, bool use_dot = false>
+class NEGEMMInterleavedPrepareBWrapperKernelTemplate : public NEGEMMInterleavedPrepareBWrapperKernel
+{
+public:
+ /** Configure the reshape B routine.
+ *
+ * @param[in] b Input matrix B.
+ * @param[out] transformed_b Reshaped matrix B.
+ * @param[in] transpose_b Also transpose B ?
+ * @param[in] ci CPU information
+ * @param[in] params M, N, K sizes.
+ */
+ void configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params);
+
+ // Inherited methods overridden:
+ void transform(const PrepareBWorkload &wl, const ThreadInfo &info) override;
+ void create_workloads(std::vector<PrepareBWorkload> &workloads) override;
+ void run(const Window &window, const ThreadInfo &info) override;
+ BlockSizes block_sizes() const override;
+
+private:
+ const ITensor *_b
+ {
+ nullptr
+ };
+ ITensor *_transformed_b{ nullptr };
+ unsigned int _Nsize{ 0 };
+ unsigned int _Ksize{ 0 };
+ bool _transpose_b{ false };
+ BlockSizes _block_sizes{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h
new file mode 100644
index 0000000000..b6831e3ca9
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Unit of work for @ref NEGEMMInterleavedTransformAWrapper to process */
+struct TransformAWorkload
+{
+ /** Constructor
+ *
+ * @param[in] k0 First value to process along the K dimension.
+ * @param[in] kmax Last value to process along the K dimension.
+ * @param[in] multi Multi index.
+ */
+ TransformAWorkload(unsigned int k0, unsigned int kmax, unsigned int multi)
+ : _k0(k0), _kmax(kmax), _multi(multi)
+ {
+ }
+ unsigned int _k0; /**< First value to process along the K dimension. */
+ unsigned int _kmax; /**< Last value to process along the K dimension. */
+ unsigned int _multi; /**< Multi index. */
+};
+
+/** Equivalent to arm_gemm::GemmInterleaved's Transform<strategy::A_interleave, strategy::A_block but using Compute Library types.
+ *
+ * Note: Each workload converts a different slice of a and writes it to transformed_a (Which can store only one slice at the time), therefore the workloads' execution should be interleaved with other workloads that make use of their result.
+ */
+class NEGEMMInterleavedTransformAWrapper
+{
+public:
+ /** Transform the block at the given coordinates
+ *
+ * @param[in] wl Workload to process.
+ * @param[in] info Information about the current thread.
+ * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
+ * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from.
+ * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing.
+ */
+ virtual void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0;
+ /** Generate an array of workloads
+ *
+ * @param[out] workloads Container to store the generated workloads.
+ */
+ virtual void create_workloads(std::vector<TransformAWorkload> &workloads) = 0;
+ /** Default destructor */
+ virtual ~NEGEMMInterleavedTransformAWrapper() = default;
+};
+
+/** Type specialisations of @ref NEGEMMInterleavedTransformAWrapper */
+template <typename To, bool use_dot = false>
+class NEGEMMInterleavedTransformAWrapperTemplate : public NEGEMMInterleavedTransformAWrapper
+{
+public:
+ /** Configure the reshape A routine.
+ *
+ * @param[in] a Input matrix A.
+ * @param[out] transformed_a Reshaped matrix A.
+ * @param[in] transpose_a Also transpose A ?
+ * @param[in] block_walker Window representing the layout of the matrix's blocks
+ * @param[in] params M, N, K sizes.
+ */
+ void configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker, const INEGEMMWrapperKernel::Params &params);
+
+ // Inherited methods overridden:
+ void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override;
+ void create_workloads(std::vector<TransformAWorkload> &workloads) override;
+
+private:
+ const ITensor *_a
+ {
+ nullptr
+ };
+ ITensor *_transformed_a{ nullptr };
+ unsigned int _Msize{ 0 };
+ unsigned int _Ksize{ 0 };
+ bool _transpose_a{ false };
+ Window _k_multi_window{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ */
diff --git a/arm_compute/core/WindowIterator.h b/arm_compute/core/WindowIterator.h
index 8e58d0ce1c..32d6293a5a 100644
--- a/arm_compute/core/WindowIterator.h
+++ b/arm_compute/core/WindowIterator.h
@@ -110,6 +110,24 @@ public:
return get_ptr(x, y, z, w);
}
+ /** Returns a pointer to the first element of the tensor
+ *
+ * @return Pointer to the first element.
+ */
+ inline T *first_element()
+ {
+ return reinterpret_cast<T *>(_first);
+ }
+
+ /** Returns a pointer to the first element of the tensor
+ *
+ * @return Pointer to the first element.
+ */
+ inline T *operator()()
+ {
+ return first_element();
+ }
+
private:
uint8_t *_first; /**< Pointer to the first element of the tensor.*/
Strides _strides; /**< Strides in bytes of the tensor */