aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-07-23 16:42:59 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit3d677ccee046cd384abf2142f323f8e9e7a4834f (patch)
tree2e0d86a1b2438cb94386c55d1bc89b3e1061214c
parent597a85666a84c9a9414264966651551564b79299 (diff)
downloadComputeLibrary-3d677ccee046cd384abf2142f323f8e9e7a4834f.tar.gz
COMPMID-1406: Refactor gemm_interleaved to use our own types and scheduler
- Ported PrepareB kernel from gemm_interleave - Ported TransformA feature from gemm_interleave - Allocate reshaped a and b buffers - Added memory_manager / memory_group - MatrixMultiply kernel - Interleave kernels execution. - Fixed a few bugs: all nightly Convolution tests passing for threads=1 and threads=4 - Added Doxygen documentations and comments in the code - Added support for all data types supported Change-Id: Iffa1c09fda0bb9c61213bb83524d5a48e7ecb03c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141281 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/NEON/kernels/assembly/Helpers.h101
-rw-r--r--arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h132
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h129
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h110
-rw-r--r--arm_compute/core/WindowIterator.h18
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h5
-rw-r--r--arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h99
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp142
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp170
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h95
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp118
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp4
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp111
-rw-r--r--src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp260
15 files changed, 1479 insertions, 25 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/Helpers.h b/arm_compute/core/NEON/kernels/assembly/Helpers.h
new file mode 100644
index 0000000000..0dcba88a95
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/Helpers.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_ASSEMBLY_HELPERS_H__
+#define __ARM_COMPUTE_ASSEMBLY_HELPERS_H__
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/Utils.h"
+
+namespace arm_compute
+{
+/** Block sizes to use to break the M, N, K dimension */
+struct BlockSizes
+{
+ unsigned int k_block{ 0 }; /**< Block size alon the K dimension */
+ unsigned int x_block{ 0 }; /**< Block size along the N (x) dimension */
+ unsigned int m_round{ 0 }; /**< Block size along the M dimension (Must be a multiple of strategy_out_height) */
+ unsigned int strategy_out_height{ 0 }; /**< Number of rows (M) processed by the selected strategy */
+};
+
+/** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used
+ *
+ * @param[in] ci CPU information
+ * @param[in] M M dimension.
+ * @param[in] N N dimension.
+ * @param[in] K K dimension.
+ *
+ * @return Recommeded block sizes to use for the given M, N, K dimensions.
+ */
+template <typename strategy>
+BlockSizes calculate_block_sizes(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K)
+{
+ BlockSizes bs;
+
+ using Toi = typename strategy::operand_type;
+
+ const unsigned int L1_size = ci.get_L1_cache_size();
+ const unsigned int L2_size = ci.get_L2_cache_size();
+
+ // Work out blocking parameters
+
+ // k_block: Find out how much of the larger array can be loaded into half the cache.
+ // This should account for associative caches.
+ bs.k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
+
+ // Needs to be (at least a single) multiple of the K unroll level.
+ bs.k_block /= strategy::k_unroll();
+ bs.k_block = std::max(bs.k_block, 1U) * strategy::k_unroll();
+
+ // Now tune to presented problem size; this is how many blocks we need.
+ int num_k_blocks = DIV_CEIL(K, bs.k_block);
+
+ // So divide the space equally into that many blocks.
+ bs.k_block = DIV_CEIL(K, num_k_blocks);
+
+ // And round UP to the K unroll level required.
+ bs.k_block = ceil_to_multiple(bs.k_block, strategy::k_unroll());
+
+ // x_block: Work out how many rows (of length k_block) will fit in the L2
+ // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
+ bs.x_block = (((L2_size * 9) / 10) - (bs.k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) / (sizeof(Toi) * bs.k_block);
+
+ // Needs to be (at least a single) multiple of the kernel output width.
+ bs.x_block /= strategy::out_width();
+ bs.x_block = std::max(bs.x_block, 1U) * strategy::out_width();
+
+ // And tune to the presented problem size.
+ int num_x_blocks = DIV_CEIL(N, bs.x_block);
+ bs.x_block = DIV_CEIL(N, num_x_blocks);
+
+ bs.x_block = ceil_to_multiple(bs.x_block, strategy::out_width());
+
+ // Work out the rounded size of M - needed for some buffers.
+ bs.m_round = ceil_to_multiple(M, strategy::out_height());
+ bs.strategy_out_height = strategy::out_height();
+
+ return bs;
+}
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_ASSEMBLY_HELPERS_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
index 02e5b58c9d..63178a738a 100644
--- a/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
+++ b/arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h
@@ -38,11 +38,11 @@ public:
/** Parameters defining the dimensions of the matrices being multiplied */
struct Params
{
- unsigned int M; /**< Rows in output matrix C (and input matrix A). */
- unsigned int N; /**< Columns in output matrix C (and input matrix B). */
- unsigned int K; /**< Columns of input matrix A (= rows of input matrix B). */
- unsigned int batches; /**< Number of "batched" GEMMs (unique A and C, shared B). */
- unsigned int multis; /**< Number of "multi" GEMMs (unique A, B and C). */
+ unsigned int M{ 0 }; /**< Rows in output matrix C (and input matrix A). */
+ unsigned int N{ 0 }; /**< Columns in output matrix C (and input matrix B). */
+ unsigned int K{ 0 }; /**< Columns of input matrix A (= rows of input matrix B). */
+ unsigned int batches{ 0 }; /**< Number of "batched" GEMMs (unique A and C, shared B). */
+ unsigned int multis{ 0 }; /**< Number of "multi" GEMMs (unique A, B and C). */
};
static Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *c);
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h
new file mode 100644
index 0000000000..46a05abcdb
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__
+
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Unit of work for @ref NEGEMMInterleavedMatrixMultiplyWrapper to process */
+struct MatrixMultiplyWorkload
+{
+ /** Constructor
+ *
+ * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation.
+ * @param[in] x0 First value to process along the X dimension (N).
+ * @param[in] xmax Last value to process along the X dimension (N).
+ * @param[in] k0 First value to process along the K dimension.
+ * @param[in] kmax Last value to process along the K dimension.
+ * @param[in] multi Multi index.
+ * @param[in] kern_k Number of elements along K actually processed by the kernel.
+ * @param[in] bblocks Number of x_block processed by the kernel.
+ */
+ MatrixMultiplyWorkload(unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax, unsigned int multi, int kern_k, int bblocks)
+ : _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax), _multi(multi), _kern_k(kern_k), _bblocks(bblocks)
+ {
+ }
+ unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/
+ unsigned int _x0; /**< First value to process along the X dimension (N). */
+ unsigned int _xmax; /**< Last value to process along the X dimension (N). */
+ unsigned int _k0; /**< First value to process along the K dimension. */
+ unsigned int _kmax; /**< Last value to process along the K dimension. */
+ unsigned int _multi; /**< Multi index. */
+ int _kern_k; /**< Number of elements along K actually processed by the kernel. */
+ int _bblocks; /**< Number of x_block processed by the kernel. */
+};
+
+/** Common interface for the templated wrappers around the matrix multiply NEON assembly implementations */
+class NEGEMMInterleavedMatrixMultiplyWrapper
+{
+public:
+ /** Transform the block at the given coordinates
+ *
+ * @param[in] wl Workload to process.
+ * @param[in] info Information about the current thread.
+ * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
+ * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from.
+ * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing.
+ */
+ virtual void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0;
+ /** Generate an array of workloads
+ *
+ * @param[out] workloads Container to store the generated workloads.
+ */
+ virtual void create_workloads(std::vector<MatrixMultiplyWorkload> &workloads) = 0;
+ /** Default destructor */
+ virtual ~NEGEMMInterleavedMatrixMultiplyWrapper() = default;
+};
+
+/** Equivalent to arm_gemm::GemmInterleaved's strategy::kernel() but using Compute Library types. */
+template <typename To, typename Tr, bool use_dot = false>
+class NEGEMMInterleavedMatrixMultiplyWrapperTemplate : public NEGEMMInterleavedMatrixMultiplyWrapper
+{
+public:
+ /** Configure the matrix multiplication: C = alpha * A * B + beta * C
+ *
+ * @param[in] prepared_a Already reshaped matrix A.
+ * @param[in] transformed_b Already reshaped matrix B.
+ * @param[out] tmp_c Temporary buffer to be used to store intermediate results.
+ * @param[in,out] c Result matrix C.
+ * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
+ * @param[in] block_sizes Block sizes to use for the matrix multiplication (A & B must have been reshaped using these same block sizes).
+ * @param[in] params M, N, K sizes.
+ * @param[in] is_pretransposed Is B also pretransposed ?
+ * @param[in] alpha Alpha value
+ * @param[in] beta Beta value
+ * @param[in] max_num_threads Maximum number of threads that might be used for the calculations.
+ */
+ void configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &batch_window, const BlockSizes &block_sizes,
+ const INEGEMMWrapperKernel::Params &params, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads);
+
+ // Inherited methods overridden:
+ void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override;
+ void create_workloads(std::vector<MatrixMultiplyWorkload> &workloads) override;
+
+private:
+ const ITensor *_prepared_a
+ {
+ nullptr
+ };
+ const ITensor *_transformed_b{ nullptr };
+ ITensor *_tmp_c{ nullptr };
+ ITensor *_c{ nullptr };
+ unsigned int _Nsize{ 0 };
+ unsigned int _Ksize{ 0 };
+ bool _transpose_b{ false };
+ BlockSizes _block_sizes{};
+ INEGEMMWrapperKernel::Params _params{};
+ Window _block_walker{};
+ bool _b_is_pretransposed{ false };
+ Tr _alpha{};
+ Tr _beta{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
new file mode 100644
index 0000000000..e46c33018b
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Unit of work for @ref NEGEMMInterleavedPrepareBWrapperKernel to process */
+struct PrepareBWorkload
+{
+ /** Constructor
+ *
+ * @param[in] offset_b Offset from the start of b's allocation
+ * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation.
+ * @param[in] x0 First value to process along the X dimension (N).
+ * @param[in] xmax Last value to process along the X dimension (N).
+ * @param[in] k0 First value to process along the K dimension.
+ * @param[in] kmax Last value to process along the K dimension.
+ */
+ PrepareBWorkload(unsigned int offset_b, unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax)
+ : _offset_b(offset_b), _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax)
+ {
+ }
+ unsigned int _offset_b; /**< Offset from the start of b's allocation.*/
+ unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/
+ unsigned int _x0; /**< First value to process along the X dimension (N). */
+ unsigned int _xmax; /**< Last value to process along the X dimension (N). */
+ unsigned int _k0; /**< First value to process along the K dimension. */
+ unsigned int _kmax; /**< Last value to process along the K dimension. */
+};
+
+/** Common interface for the templated wrappers around the B reshape NEON assembly implementations */
+class NEGEMMInterleavedPrepareBWrapperKernel : public INEKernel
+{
+public:
+ /** Transform the block at the given coordinates
+ *
+ * @param[in] wl Workload to process.
+ * @param[in] info Information about the current thread.
+ */
+ virtual void transform(const PrepareBWorkload &wl, const ThreadInfo &info) = 0;
+ /** Generate an array of workloads
+ *
+ * @param[out] workloads Container to store the generated workloads.
+ */
+ virtual void create_workloads(std::vector<PrepareBWorkload> &workloads) = 0;
+ /** Return the block_sizes used to resape B
+ *
+ * The same block sizes must be used to reshape A and for the matrix multiplication
+ *
+ * @return The block sizes used to reshape B.
+ */
+ virtual BlockSizes block_sizes() const = 0;
+
+ // Inherited methods overridden:
+ const char *name() const override
+ {
+ return "NEGEMMInterleavedPrepareBWrapperKernel";
+ }
+
+ bool is_parallelisable() const override
+ {
+ return false; // Can't run on arbitrary windows but can be parallelised using an array of workloads
+ }
+};
+
+/** Equivalent to arm_gemm::GemmInterleaved's strategy::transforms::PrepareB() but using Compute Library types.
+ */
+template <typename To, bool use_dot = false>
+class NEGEMMInterleavedPrepareBWrapperKernelTemplate : public NEGEMMInterleavedPrepareBWrapperKernel
+{
+public:
+ /** Configure the reshape B routine.
+ *
+ * @param[in] b Input matrix B.
+ * @param[out] transformed_b Reshaped matrix B.
+ * @param[in] transpose_b Also transpose B ?
+ * @param[in] ci CPU information
+ * @param[in] params M, N, K sizes.
+ */
+ void configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params);
+
+ // Inherited methods overridden:
+ void transform(const PrepareBWorkload &wl, const ThreadInfo &info) override;
+ void create_workloads(std::vector<PrepareBWorkload> &workloads) override;
+ void run(const Window &window, const ThreadInfo &info) override;
+ BlockSizes block_sizes() const override;
+
+private:
+ const ITensor *_b
+ {
+ nullptr
+ };
+ ITensor *_transformed_b{ nullptr };
+ unsigned int _Nsize{ 0 };
+ unsigned int _Ksize{ 0 };
+ bool _transpose_b{ false };
+ BlockSizes _block_sizes{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h
new file mode 100644
index 0000000000..b6831e3ca9
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Unit of work for @ref NEGEMMInterleavedTransformAWrapper to process */
+struct TransformAWorkload
+{
+ /** Constructor
+ *
+ * @param[in] k0 First value to process along the K dimension.
+ * @param[in] kmax Last value to process along the K dimension.
+ * @param[in] multi Multi index.
+ */
+ TransformAWorkload(unsigned int k0, unsigned int kmax, unsigned int multi)
+ : _k0(k0), _kmax(kmax), _multi(multi)
+ {
+ }
+ unsigned int _k0; /**< First value to process along the K dimension. */
+ unsigned int _kmax; /**< Last value to process along the K dimension. */
+ unsigned int _multi; /**< Multi index. */
+};
+
+/** Equivalent to arm_gemm::GemmInterleaved's Transform<strategy::A_interleave, strategy::A_block but using Compute Library types.
+ *
+ * Note: Each workload converts a different slice of a and writes it to transformed_a (Which can store only one slice at the time), therefore the workloads' execution should be interleaved with other workloads that make use of their result.
+ */
+class NEGEMMInterleavedTransformAWrapper
+{
+public:
+ /** Transform the block at the given coordinates
+ *
+ * @param[in] wl Workload to process.
+ * @param[in] info Information about the current thread.
+ * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
+ * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from.
+ * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing.
+ */
+ virtual void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0;
+ /** Generate an array of workloads
+ *
+ * @param[out] workloads Container to store the generated workloads.
+ */
+ virtual void create_workloads(std::vector<TransformAWorkload> &workloads) = 0;
+ /** Default destructor */
+ virtual ~NEGEMMInterleavedTransformAWrapper() = default;
+};
+
+/** Type specialisations of @ref NEGEMMInterleavedTransformAWrapper */
+template <typename To, bool use_dot = false>
+class NEGEMMInterleavedTransformAWrapperTemplate : public NEGEMMInterleavedTransformAWrapper
+{
+public:
+ /** Configure the reshape A routine.
+ *
+ * @param[in] a Input matrix A.
+ * @param[out] transformed_a Reshaped matrix A.
+ * @param[in] transpose_a Also transpose A ?
+ * @param[in] block_walker Window representing the layout of the matrix's blocks
+ * @param[in] params M, N, K sizes.
+ */
+ void configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker, const INEGEMMWrapperKernel::Params &params);
+
+ // Inherited methods overridden:
+ void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override;
+ void create_workloads(std::vector<TransformAWorkload> &workloads) override;
+
+private:
+ const ITensor *_a
+ {
+ nullptr
+ };
+ ITensor *_transformed_a{ nullptr };
+ unsigned int _Msize{ 0 };
+ unsigned int _Ksize{ 0 };
+ bool _transpose_a{ false };
+ Window _k_multi_window{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ */
diff --git a/arm_compute/core/WindowIterator.h b/arm_compute/core/WindowIterator.h
index 8e58d0ce1c..32d6293a5a 100644
--- a/arm_compute/core/WindowIterator.h
+++ b/arm_compute/core/WindowIterator.h
@@ -110,6 +110,24 @@ public:
return get_ptr(x, y, z, w);
}
+ /** Returns a pointer to the first element of the tensor
+ *
+ * @return Pointer to the first element.
+ */
+ inline T *first_element()
+ {
+ return reinterpret_cast<T *>(_first);
+ }
+
+ /** Returns a pointer to the first element of the tensor
+ *
+ * @return Pointer to the first element.
+ */
+ inline T *operator()()
+ {
+ return first_element();
+ }
+
private:
uint8_t *_first; /**< Pointer to the first element of the tensor.*/
Strides _strides; /**< Strides in bytes of the tensor */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
index 382ef1caba..2fc2cf4a99 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -77,8 +77,9 @@ private:
bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint);
/** Interface for the arm_gemm fallback */
- std::unique_ptr<IFallback> _arm_gemm;
- MemoryGroup _memory_group; /**< Function memory group */
+ std::unique_ptr<IFallback> _arm_gemm;
+ MemoryGroup _memory_group; /**< Function memory group */
+ std::shared_ptr<IMemoryManager> _memory_manager; /**< Copy of the memory manager used to create the memory group to be used when instantiating new functions */
public:
/** If supported create an ACL function else fallback to the arm_gemm function.
*
diff --git a/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h b/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
new file mode 100644
index 0000000000..cead71ed67
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDWRAPPER_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDWRAPPER_H__
+
+#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
+#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/IScheduler.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ITensor;
+class NEGEMMInterleavedPrepareBWrapperKernel;
+class PrepareBWorkload;
+class TransformAWorkload;
+class MatrixMultiplyWorkload;
+class NEGEMMInterleavedTransformAWrapper;
+class NEGEMMInterleavedMatrixMultiplyWrapper;
+
+/** Equivalent to arm_gemm::GemmInterleaved but using Compute Library types.
+ */
+class NEGEMMInterleavedWrapper : public IFunction
+{
+public:
+ NEGEMMInterleavedWrapper(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+
+ NEGEMMInterleavedWrapper(const NEGEMMInterleavedWrapper &) = delete;
+ NEGEMMInterleavedWrapper &operator=(const NEGEMMInterleavedWrapper &) = delete;
+
+ /** Initialise the kernel's input and output.
+ *
+ * @note The input and output tensor must have the same dimensions
+ *
+ * @param[in] a Input tensor (Matrix A)
+ * @param[in] b Input tensor (Matrix B)
+ * @param[out] c Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
+ * @param[in] alpha Scalar multiplier to apply to AB matrix product.
+ * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
+ * @param[in] pretranspose_b If true, pretranspose B once during the prepare() stage instead of on the fly every time.
+ * @param[in] use_dot (Optional) If the input's type is U8/S8/QASYMM8 then use the dot product flavour or the matrix multiply routine. (Must be supported by the hardware).
+ */
+ void configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta, bool pretranspose_b, bool use_dot = false);
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ MemoryGroup _memory_group;
+ bool _is_prepared{ false };
+ bool _pretranspose_b{ false };
+ Window _block_walker{};
+ Window _batch_window{};
+ const ITensor *_a{ nullptr };
+ const ITensor *_b{ nullptr };
+ ITensor *_c{ nullptr };
+ Tensor _transformed_b{};
+ Tensor _transformed_a{};
+ Tensor _tmp_c{};
+ INEGEMMWrapperKernel::Params _params{};
+ BlockSizes _block_sizes{};
+ std::unique_ptr<NEGEMMInterleavedPrepareBWrapperKernel> _prepare_b{ nullptr };
+ std::unique_ptr<NEGEMMInterleavedTransformAWrapper> _transform_a{ nullptr };
+ std::unique_ptr<NEGEMMInterleavedMatrixMultiplyWrapper> _matrix_multiply{ nullptr };
+ std::vector<TransformAWorkload> _a_workloads{};
+ std::vector<PrepareBWorkload> _b_workloads{};
+ std::vector<MatrixMultiplyWorkload> _mm_workloads{};
+ std::vector<IScheduler::Workload> _workloads{};
+};
+
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDWRAPPER_H__ */
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp
new file mode 100644
index 0000000000..3d42f8a51f
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
+
+#include "NEGEMMInterleavedStrategies.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/WindowIterator.h"
+
+namespace arm_compute
+{
+template <typename To, typename Tr, bool use_dot>
+void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker,
+ const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params &params, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads)
+{
+ using strategy = typename Kernel<To>::strategy;
+
+ _prepared_a = prepared_a;
+ _transformed_b = transformed_b;
+ _tmp_c = tmp_c;
+ _c = c;
+ _block_walker = block_walker;
+ _block_sizes = block_sizes;
+ _params = params;
+ _b_is_pretransposed = b_is_pretransposed;
+ _alpha = alpha;
+ _beta = beta;
+
+ auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads }));
+}
+
+template <typename To, typename Tr, bool use_dot>
+void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset,
+ const Coordinates &end_offset)
+{
+ using strategy = typename Kernel<To>::strategy;
+
+ strategy strat(info.cpu_info);
+ TensorAccessor<To> prepared_a(*_prepared_a);
+ TensorAccessor<To> transformed_b(*_transformed_b);
+ TensorAccessor<Tr> c(*_c);
+ TensorAccessor<Tr> tmp_c(*_tmp_c);
+
+ int prev_batch = -1;
+ To *a_ptr = nullptr;
+ auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
+ {
+ const unsigned int y = id.x();
+ const unsigned int batch = id.y();
+ const unsigned int ymax = std::min(_params.M, y + strategy::out_height());
+
+ // If it's the first block of a new batch then reset the pointer to A.
+ if(prev_batch != static_cast<int>(batch))
+ {
+ const unsigned int first_m = id.x();
+ a_ptr = prepared_a(0, first_m, batch);
+ prev_batch = batch;
+ }
+
+ // Call matrix multiply assembly routine to process the block:
+ strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k);
+ a_ptr += strategy::out_height() * wl._kern_k;
+
+ // Merge the result with the other blocks' results:
+ strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast<Tr>(1)));
+ });
+ auto on_new_row_size = [&](unsigned int start, unsigned int end)
+ {
+ //Nothing to do
+ };
+ window_iterator.iterate_2D(on_new_row_size);
+}
+
+template <typename To, typename Tr, bool use_dot>
+void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::create_workloads(std::vector<MatrixMultiplyWorkload> &workloads)
+{
+ using strategy = typename Kernel<To>::strategy;
+
+ unsigned int offset_transformed_b = 0;
+ execute_window_loop(_block_walker, [&](const Coordinates & id)
+ {
+ const unsigned int x0 = id.x();
+ const unsigned int k0 = id.y();
+ const unsigned int multi = id.z();
+
+ const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N);
+ const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K);
+
+ // Figure out how many "K" the kernel will actually process.
+ const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll());
+ const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width());
+
+ workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks));
+
+ if(_b_is_pretransposed)
+ {
+ offset_transformed_b += bblocks * strategy::out_width() * kern_k;
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ });
+}
+
+//TODO: regroup somewhere ?
+template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<float, float>;
+#ifdef __aarch64__
+template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<uint8_t, uint32_t>;
+template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<int8_t, int32_t>;
+template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<uint8_t, uint32_t, true>;
+template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<int8_t, int32_t, true>;
+#endif /* __aarch64__ */
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<float16_t, float16_t>;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp
new file mode 100644
index 0000000000..f33a14f2af
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h"
+
+#include "NEGEMMInterleavedStrategies.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+
+namespace arm_compute
+{
+namespace
+{
+// Call the lambda function for each workload generated by the passed window.
+template <typename To, bool use_dot, typename Lambda>
+void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda)
+{
+ using strategy = typename Kernel<To, use_dot>::strategy;
+
+ unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes();
+ execute_window_loop(window, [&](const Coordinates & coordinates)
+ {
+ const unsigned int x0 = coordinates.x();
+ const unsigned int k0 = coordinates.y();
+ const unsigned int multi = coordinates.z();
+
+ const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi));
+ const unsigned int xmax = std::min(x0 + window.x().step(), N);
+ const unsigned int kmax = std::min(k0 + window.y().step(), K);
+
+ /* Figure out the size of each block. */
+ unsigned int x_size = (xmax - x0);
+ unsigned int k_size = (kmax - k0);
+
+ /* Round sizes up as needed. */
+ x_size = ceil_to_multiple(x_size, strategy::out_width());
+ k_size = ceil_to_multiple(k_size, strategy::k_unroll());
+
+ lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax));
+
+ //Each workload represents one block:
+ offset_transformed_b += (x_size * k_size * sizeof(To));
+ });
+}
+
+// Calculate the size of transformed_b:
+template <typename To, bool use_dot>
+unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs)
+{
+ using strategy = typename Kernel<To, use_dot>::strategy;
+
+ // How many full blocks do N / K contain ?
+ size_t num_full_k = K / bs.k_block;
+ size_t num_full_x = N / bs.x_block;
+
+ ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0);
+ ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0);
+
+ size_t normal_x_size = bs.x_block;
+ size_t normal_k_size = bs.k_block;
+
+ // Round up the leftovers to be a multiple of the strategy processing size:
+ size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width());
+ size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll());
+
+ // Calculate the total size of the buffer:
+ size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size);
+ total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size);
+ total *= sizeof(To);
+ return total;
+}
+
+} // namespace
+
+template <typename To, bool use_dot>
+BlockSizes NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::block_sizes() const
+{
+ return _block_sizes;
+}
+
+template <typename To, bool use_dot>
+void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params)
+{
+ using strategy = typename Kernel<To, use_dot>::strategy;
+
+ const unsigned int multis = b->info()->tensor_shape().z();
+ _Nsize = b->info()->tensor_shape().x();
+ _Ksize = b->info()->tensor_shape().y();
+ _b = b;
+ _transformed_b = transformed_b;
+ _transpose_b = transpose_b;
+
+ _block_sizes = calculate_block_sizes<strategy>(ci, params.M, params.N, params.K);
+
+ auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ get_B_pretransposed_array_size<To, use_dot>(_Nsize, _Ksize, _block_sizes) }));
+
+ Window window;
+ window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block));
+ window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block));
+ window.set(Window::DimZ, Window::Dimension(0, multis));
+
+ INEKernel::configure(window);
+}
+
+template <typename To, bool use_dot>
+void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::transform(const PrepareBWorkload &wl, const ThreadInfo &info)
+{
+ using strategy = typename Kernel<To, use_dot>::strategy;
+
+ strategy strat(info.cpu_info);
+ strat.transforms.PrepareB(reinterpret_cast<To *>(_transformed_b->buffer() + wl._offset_transformed_b),
+ reinterpret_cast<To *>(_b->buffer() + wl._offset_b),
+ _b->info()->strides_in_bytes().y() / sizeof(To),
+ wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b);
+}
+
+template <typename To, bool use_dot>
+void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::create_workloads(std::vector<PrepareBWorkload> &workloads)
+{
+ for_each_element_in_window<To, use_dot>(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl)
+ {
+ workloads.push_back(std::move(wl));
+ });
+}
+
+template <typename To, bool use_dot>
+void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window());
+ for_each_element_in_window<To, use_dot>(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl)
+ {
+ this->transform(wl, info);
+ });
+}
+
+template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<float>;
+#ifdef __aarch64__
+template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<uint8_t>;
+template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<int8_t>;
+template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<uint8_t, true>;
+template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<int8_t, true>;
+#endif /* __aarch64__ */
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<float16_t>;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
new file mode 100644
index 0000000000..26a8ade461
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__
+#define __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__
+
+#include "../arm_gemm/utils.hpp"
+#include "arm_gemm.hpp"
+
+#include "../arm_gemm/mergeresults.hpp"
+#include "../arm_gemm/transform.hpp"
+
+#include "../arm_gemm/kernels/a32_sgemm_8x6.hpp"
+#include "../arm_gemm/kernels/a64_gemm_s8_12x8.hpp"
+#include "../arm_gemm/kernels/a64_gemm_s8_4x4.hpp"
+#include "../arm_gemm/kernels/a64_gemm_u8_12x8.hpp"
+#include "../arm_gemm/kernels/a64_gemm_u8_4x4.hpp"
+#include "../arm_gemm/kernels/a64_hgemm_24x8.hpp"
+#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp"
+
+namespace arm_compute
+{
+namespace
+{
+template <typename To, bool use_dot = false>
+struct Kernel
+{
+};
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template <>
+struct Kernel<float16_t, false>
+{
+ using strategy = arm_gemm::hgemm_24x8;
+};
+#endif /*__ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#ifdef __aarch64__
+template <>
+struct Kernel<float, false>
+{
+ using strategy = arm_gemm::sgemm_12x8;
+};
+template <>
+struct Kernel<int8_t, false>
+{
+ using strategy = arm_gemm::gemm_s8_4x4;
+};
+template <>
+struct Kernel<uint8_t, false>
+{
+ using strategy = arm_gemm::gemm_u8_4x4;
+};
+
+//Use different strategies for 8bit dot product:
+template <>
+struct Kernel<int8_t, true>
+{
+ using strategy = arm_gemm::gemm_s8_12x8;
+};
+template <>
+struct Kernel<uint8_t, true>
+{
+ using strategy = arm_gemm::gemm_u8_12x8;
+};
+#else
+template <>
+struct Kernel<float, false>
+{
+ using strategy = arm_gemm::sgemm_8x6;
+};
+#endif /* __aarch64__ */
+
+} // namespace
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ */
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp
new file mode 100644
index 0000000000..3b80a1f940
--- /dev/null
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h"
+
+#include "NEGEMMInterleavedStrategies.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/WindowIterator.h"
+
+#include "utils/TypePrinter.h"
+
+namespace arm_compute
+{
+template <typename To, bool use_dot>
+void NEGEMMInterleavedTransformAWrapperTemplate<To, use_dot>::configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker,
+ const INEGEMMWrapperKernel::Params &params)
+{
+ _a = a;
+ _transformed_a = transformed_a;
+ _transpose_a = transpose_a;
+ _Ksize = params.K;
+ _Msize = params.M;
+ _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension
+}
+
+template <typename To, bool use_dot>
+void NEGEMMInterleavedTransformAWrapperTemplate<To, use_dot>::transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset,
+ const Coordinates &end_offset)
+{
+ using strategy = typename Kernel<To, use_dot>::strategy;
+
+ strategy strat(info.cpu_info);
+ TensorAccessor<To> a(*_a);
+ TensorAccessor<To> transformed_a(*_transformed_a);
+
+ if(_a->info()->data_layout() == DataLayout::NHWC)
+ {
+ // In the case of NHWC we want to interpret the output shape as 3D. Thus, the batch stride for A is
+ // the relevant multiple of the row stride.
+ const size_t nhwc_batch_stride = _a->info()->strides_in_bytes().y() * _Msize;
+ a.set_stride(2, nhwc_batch_stride);
+ }
+
+ unsigned int last_m = 0;
+ //TODO: Create a new iterate_1D( DimY);
+ int last_y = -1;
+ auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
+ {
+ if(id.y() != last_y)
+ {
+ last_y = id.y();
+ unsigned int batch = id.y();
+ unsigned int first_m = id.x();
+
+ if(first_m >= last_m)
+ return;
+
+ strat.transforms.PrepareA(transformed_a(0, first_m, batch),
+ a(0, 0, batch, wl._multi),
+ a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a);
+ }
+ });
+ auto on_new_row_size = [&](unsigned int start, unsigned int end)
+ {
+ last_m = std::min(end, _Msize);
+ };
+ window_iterator.iterate_2D(on_new_row_size);
+}
+
+template <typename To, bool use_dot>
+void NEGEMMInterleavedTransformAWrapperTemplate<To, use_dot>::create_workloads(std::vector<TransformAWorkload> &workloads)
+{
+ execute_window_loop(_k_multi_window, [&](const Coordinates & id)
+ {
+ const unsigned int k0 = id.x();
+ const unsigned int multi = id.y();
+ const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize);
+
+ workloads.push_back(TransformAWorkload(k0, kmax, multi));
+ });
+}
+
+template class NEGEMMInterleavedTransformAWrapperTemplate<float>;
+#ifdef __aarch64__
+template class NEGEMMInterleavedTransformAWrapperTemplate<uint8_t>;
+template class NEGEMMInterleavedTransformAWrapperTemplate<int8_t>;
+template class NEGEMMInterleavedTransformAWrapperTemplate<uint8_t, true>;
+template class NEGEMMInterleavedTransformAWrapperTemplate<int8_t, true>;
+#endif /* __aarch64__ */
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template class NEGEMMInterleavedTransformAWrapperTemplate<float16_t>;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
index ea6a06cada..e452dfbcf2 100644
--- a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
+++ b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
@@ -42,6 +42,8 @@
namespace arm_compute
{
+namespace
+{
template <typename To, typename Tr>
struct Kernel
{
@@ -55,6 +57,8 @@ struct Kernel<float, float>
};
#endif /* __aarch64__ */
+} // namespace
+
template <typename To, typename Tr>
Window NEGEMMNativeWrapperKernel<To, Tr>::configure_internal(float alpha, float beta)
{
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index f17da7d2e4..8ba620fe51 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -24,9 +24,13 @@
#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "arm_compute/core/CPP/Validate.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h"
#include "arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h"
+#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h"
#include <arm_neon.h>
@@ -34,8 +38,31 @@ namespace arm_compute
{
namespace
{
+std::unique_ptr<IFunction> create_function_all_types(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
+ std::shared_ptr<IMemoryManager> memory_manager)
+
+{
+ //Note: It's safe to not check for FP16 support because this was already checked in NEGEMMAssemblyDispatch::configure()
+ switch(method)
+ {
+ case arm_gemm::GemmMethod::GEMM_INTERLEAVED:
+ {
+ if(!pretranspose_hint)
+ {
+ return nullptr;
+ }
+ auto function = support::cpp14::make_unique<NEGEMMInterleavedWrapper>(memory_manager);
+ function->configure(a, b, d, alpha, beta, pretranspose_hint);
+ return std::move(function);
+ }
+ default:
+ return nullptr;
+ }
+}
+
template <typename TypeInput, typename TypeOutput>
-std::unique_ptr<IFunction> create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
+std::unique_ptr<IFunction> create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
+ std::shared_ptr<IMemoryManager> memory_manager)
{
ARM_COMPUTE_UNUSED(method);
ARM_COMPUTE_UNUSED(a);
@@ -44,21 +71,63 @@ std::unique_ptr<IFunction> create_function(arm_gemm::GemmMethod method, const IT
ARM_COMPUTE_UNUSED(alpha);
ARM_COMPUTE_UNUSED(beta);
ARM_COMPUTE_UNUSED(pretranspose_hint);
+ ARM_COMPUTE_UNUSED(memory_manager);
return nullptr;
}
+
+#ifdef __aarch64__
template <>
-std::unique_ptr<IFunction> create_function<float, float>(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint)
+std::unique_ptr<IFunction> create_function<int8_t, int32_t>(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
+ std::shared_ptr<IMemoryManager> memory_manager)
+{
+ switch(method)
+ {
+ case arm_gemm::GemmMethod::GEMM_INTERLEAVED_DOT:
+ {
+ if(!pretranspose_hint)
+ {
+ return nullptr;
+ }
+ auto function = support::cpp14::make_unique<NEGEMMInterleavedWrapper>(memory_manager);
+ function->configure(a, b, d, alpha, beta, pretranspose_hint, true /* use_dot */);
+ return std::move(function);
+ }
+ default:
+ return nullptr;
+ }
+ return nullptr;
+}
+
+template <>
+std::unique_ptr<IFunction> create_function<uint8_t, uint32_t>(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
+ std::shared_ptr<IMemoryManager> memory_manager)
+{
+ switch(method)
+ {
+ case arm_gemm::GemmMethod::GEMM_INTERLEAVED_DOT:
+ {
+ if(!pretranspose_hint)
+ {
+ return nullptr;
+ }
+ auto function = support::cpp14::make_unique<NEGEMMInterleavedWrapper>(memory_manager);
+ function->configure(a, b, d, alpha, beta, pretranspose_hint, true /* use_dot */);
+ return std::move(function);
+ }
+ default:
+ return nullptr;
+ }
+ return nullptr;
+}
+
+template <>
+std::unique_ptr<IFunction> create_function<float, float>(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
+ std::shared_ptr<IMemoryManager> memory_manager)
{
- ARM_COMPUTE_UNUSED(method);
- ARM_COMPUTE_UNUSED(a);
- ARM_COMPUTE_UNUSED(b);
- ARM_COMPUTE_UNUSED(d);
- ARM_COMPUTE_UNUSED(alpha);
- ARM_COMPUTE_UNUSED(beta);
ARM_COMPUTE_UNUSED(pretranspose_hint);
+ ARM_COMPUTE_UNUSED(memory_manager);
switch(method)
{
-#ifdef __aarch64__
case arm_gemm::GemmMethod::GEMM_NATIVE:
{
auto kernel = support::cpp14::make_unique<NEGEMMNativeWrapperKernel<float, float>>();
@@ -67,11 +136,11 @@ std::unique_ptr<IFunction> create_function<float, float>(arm_gemm::GemmMethod me
function->configure(std::move(kernel));
return std::move(function);
}
-#endif /* __aarch64__ */
default:
return nullptr;
}
}
+#endif /* __aarch64__ */
/** Fallback in case ACL doesn't have a function */
template <typename TypeInput, typename TypeOutput>
@@ -173,11 +242,11 @@ void Fallback<TypeInput, TypeOutput>::prepare()
// Pretranspose B if required
if(_gemm_kernel_asm->B_pretranspose_required())
{
+ ARM_COMPUTE_ERROR_ON(_pretranspose.buffer() == nullptr);
const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
const auto in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
- ARM_COMPUTE_ERROR_ON(_pretranspose.buffer() == nullptr);
_gemm_kernel_asm->pretranspose_B_array(_pretranspose.buffer(), in1_ptr, ldb, multi_stride_b);
_b->mark_as_unused();
}
@@ -260,7 +329,7 @@ void Fallback<TypeInput, TypeOutput>::run()
template <typename TypeInput, typename TypeOutput>
void create_function_or_arm_gemm(std::unique_ptr<IFunction> &acl_function, std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group, const ITensor *a, const ITensor *b,
- ITensor *d, float alpha, float beta, bool pretranspose_hint)
+ ITensor *d, float alpha, float beta, bool pretranspose_hint, std::shared_ptr<IMemoryManager> memory_manager)
{
INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d);
const CPUInfo &ci = NEScheduler::get().cpu_info();
@@ -269,7 +338,13 @@ void create_function_or_arm_gemm(std::unique_ptr<IFunction> &acl_function, std::
arm_gemm::GemmArgs<TypeOutput> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint);
//Try to create an ACL function:
- acl_function = create_function<TypeInput, TypeOutput>(arm_gemm::get_gemm_method<TypeInput, TypeOutput>(args), a, b, d, alpha, beta, pretranspose_hint);
+ acl_function = create_function_all_types(arm_gemm::get_gemm_method<TypeInput, TypeOutput>(args), a, b, d, alpha, beta, pretranspose_hint, memory_manager);
+ // If the type agnostic factory failed to create an ACL function, try the specialised one:
+ if(acl_function == nullptr)
+ {
+ acl_function = create_function<TypeInput, TypeOutput>(arm_gemm::get_gemm_method<TypeInput, TypeOutput>(args), a, b, d, alpha, beta, pretranspose_hint, memory_manager);
+ }
+ //If we still don't have an ACL function:
if(acl_function == nullptr)
{
//Fallback onto arm_gemm function if ACL doesn't support this method.
@@ -282,7 +357,7 @@ void create_function_or_arm_gemm(std::unique_ptr<IFunction> &acl_function, std::
} //namespace
NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
- : _function(nullptr), _arm_gemm(nullptr), _memory_group(std::move(memory_manager))
+ : _function(nullptr), _arm_gemm(nullptr), _memory_group(memory_manager), _memory_manager(memory_manager)
{
}
@@ -321,20 +396,20 @@ void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, ITens
switch(a->info()->data_type())
{
case DataType::F32:
- create_function_or_arm_gemm<float, float>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint);
+ create_function_or_arm_gemm<float, float>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager);
break;
#ifdef __aarch64__
case DataType::U8:
case DataType::QASYMM8:
- create_function_or_arm_gemm<uint8_t, uint32_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint);
+ create_function_or_arm_gemm<uint8_t, uint32_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager);
break;
case DataType::S8:
- create_function_or_arm_gemm<int8_t, int32_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint);
+ create_function_or_arm_gemm<int8_t, int32_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager);
break;
#endif /* __aarch64__ */
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
- create_function_or_arm_gemm<float16_t, float16_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint);
+ create_function_or_arm_gemm<float16_t, float16_t>(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager);
break;
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
default:
diff --git a/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp b/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp
new file mode 100644
index 0000000000..434723ca1a
--- /dev/null
+++ b/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h"
+#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+NEGEMMInterleavedWrapper::NEGEMMInterleavedWrapper(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager))
+{
+}
+void NEGEMMInterleavedWrapper::run()
+{
+ prepare();
+
+ _memory_group.acquire();
+ NEScheduler::get().run_workloads(_workloads);
+ _memory_group.release();
+}
+
+void NEGEMMInterleavedWrapper::prepare()
+{
+ if(!_is_prepared)
+ {
+ if(_pretranspose_b)
+ {
+ NEScheduler::get().schedule(_prepare_b.get(), Window::DimX);
+ _b->mark_as_unused();
+ }
+ else
+ {
+ _prepare_b->create_workloads(_b_workloads);
+ }
+ _transform_a->create_workloads(_a_workloads);
+ _matrix_multiply->create_workloads(_mm_workloads);
+
+ //Maximum number of workloads to create:
+ const unsigned int num_threads = NEScheduler::get().num_threads();
+ const unsigned int max_iterations = num_threads == 1 ? 1 : num_threads * 4;
+ //Maximum number of iterations the parameters allow:
+ const unsigned int num_iterations = _batch_window.num_iterations_total();
+ // Keep the smallest of the two:
+ const unsigned int num_windows = std::min(num_iterations, max_iterations);
+ const TensorShape window_shape = _batch_window.shape();
+
+ // Create a 1D window to dynamically split the batch window:
+ Window win_1D;
+ win_1D.set(0, Window::Dimension(0, num_iterations));
+
+ // Create one workload for each sub-window:
+ for(unsigned int w = 0; w < num_windows; w++)
+ {
+ Window win = win_1D.split_window(0, w, num_windows);
+ const Coordinates start_offset = index2coords(window_shape, win.x().start());
+ const Coordinates end_offset = index2coords(window_shape, win.x().end() - 1);
+ const unsigned int num_x_blocks = _block_walker.num_iterations(Window::DimX);
+
+ auto workload = [start_offset, end_offset, num_x_blocks, this](const ThreadInfo & info)
+ {
+ //For each block of rows in "M"
+ auto workload_mm = this->_mm_workloads.begin();
+ for(auto workload_a = this->_a_workloads.begin(); workload_a != this->_a_workloads.end(); workload_a++)
+ {
+ // Transform one k_block from A:
+ this->_transform_a->transform(*workload_a, info, this->_batch_window, start_offset, end_offset);
+ // Then perform the matrix multiplication for each x block along N:
+ for(unsigned int i = 0; i < num_x_blocks; i++)
+ {
+ ARM_COMPUTE_ERROR_ON(workload_mm == this->_mm_workloads.end());
+ this->_matrix_multiply->transform(*workload_mm++, info, this->_batch_window, start_offset, end_offset);
+ }
+ }
+ };
+ _workloads.push_back(workload);
+ }
+
+ _is_prepared = true;
+ }
+}
+
+namespace
+{
+// Factory to instantiate NEGEMMInterleavedPrepareBWrapperKernel:
+template <typename InputType, bool use_dot = false>
+std::unique_ptr<NEGEMMInterleavedPrepareBWrapperKernel> instantiate_prepareB(const ITensor *b, ITensor *transformed_b, const INEGEMMWrapperKernel::Params &params)
+{
+ auto prepare_b = support::cpp14::make_unique<NEGEMMInterleavedPrepareBWrapperKernelTemplate<InputType, use_dot>>();
+ prepare_b->configure(b, transformed_b, false, NEScheduler::get().cpu_info(), params);
+ return std::move(prepare_b);
+}
+
+// Factory to instantiate NEGEMMInterleavedTransformAWrapperTemplate:
+template <typename InputType, bool use_dot = false>
+std::unique_ptr<NEGEMMInterleavedTransformAWrapper> instantiate_transformA(const ITensor *a, ITensor *transformed_a, const Window &block_walker, const INEGEMMWrapperKernel::Params &params)
+{
+ auto transform_a = support::cpp14::make_unique<NEGEMMInterleavedTransformAWrapperTemplate<InputType, use_dot>>();
+ transform_a->configure(a, transformed_a, false, block_walker, params);
+ return std::move(transform_a);
+}
+
+// Factory to instantiate NEGEMMInterleavedTransformAWrapperTemplate:
+template <typename InputType, typename OutputType, bool use_dot = false>
+std::unique_ptr<NEGEMMInterleavedMatrixMultiplyWrapper> instantiate_matrix_multiply(const ITensor *transformed_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker,
+ const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params &params, bool pretranspose_b, float alpha, float beta)
+{
+ auto matrix_multiply = support::cpp14::make_unique<NEGEMMInterleavedMatrixMultiplyWrapperTemplate<InputType, OutputType, use_dot>>();
+ matrix_multiply->configure(transformed_a, transformed_b, tmp_c, c, block_walker, block_sizes, params, pretranspose_b, alpha, beta, NEScheduler::get().num_threads());
+ return std::move(matrix_multiply);
+}
+} // namespace
+
+void NEGEMMInterleavedWrapper::configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta, bool pretranspose_b, bool use_dot)
+{
+ _params = INEGEMMWrapperKernel::extract_parameters(a, b, c);
+ _a = a;
+ _b = b;
+ _c = c;
+ _pretranspose_b = pretranspose_b;
+
+ DataType input_type = a->info()->data_type();
+
+ // Forcing 128-byte alignment (required by 32-bit kernels)
+ const unsigned int alignment = 128;
+ _transformed_b.allocator()->init(TensorInfo{}, alignment);
+ _tmp_c.allocator()->init(TensorInfo{}, alignment);
+ if(!_pretranspose_b)
+ {
+ // If B is transposed at every iteration then transformed_B can be managed:
+ _memory_group.manage(&_transformed_b);
+ }
+ switch(input_type)
+ {
+ case DataType::F32:
+ _prepare_b = instantiate_prepareB<float>(_b, &_transformed_b, _params);
+ break;
+#ifdef __aarch64__
+ case DataType::U8:
+ case DataType::QASYMM8:
+ if(use_dot)
+ {
+ _prepare_b = instantiate_prepareB<uint8_t, true>(_b, &_transformed_b, _params);
+ }
+ else
+ {
+ _prepare_b = instantiate_prepareB<uint8_t, false>(_b, &_transformed_b, _params);
+ }
+ break;
+ case DataType::S8:
+ if(use_dot)
+ {
+ _prepare_b = instantiate_prepareB<int8_t, true>(_b, &_transformed_b, _params);
+ }
+ else
+ {
+ _prepare_b = instantiate_prepareB<int8_t, false>(_b, &_transformed_b, _params);
+ }
+ break;
+#endif /* __aarch64__ */
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ _prepare_b = instantiate_prepareB<__fp16>(_b, &_transformed_b, _params);
+ break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ ARM_COMPUTE_ERROR("DataType not supported");
+ break;
+ }
+ ARM_COMPUTE_ERROR_ON(_prepare_b == nullptr);
+
+ _block_sizes = _prepare_b->block_sizes();
+
+ _block_walker.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_params.N, _block_sizes.x_block), _block_sizes.x_block));
+ _block_walker.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_params.K, _block_sizes.k_block), _block_sizes.k_block));
+ _block_walker.set(Window::DimZ, Window::Dimension(0, _params.multis));
+
+ _batch_window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_block_sizes.m_round, _block_sizes.strategy_out_height), _block_sizes.strategy_out_height));
+ _batch_window.set(Window::DimY, Window::Dimension(0, _params.batches));
+
+ _transformed_a.allocator()->init(TensorInfo(TensorShape{ _block_sizes.k_block, _block_sizes.m_round, _params.batches }, 1, input_type), alignment);
+ _memory_group.manage(&_transformed_a);
+ _memory_group.manage(&_tmp_c);
+
+ switch(input_type)
+ {
+ case DataType::F32:
+ _transform_a = instantiate_transformA<float>(_a, &_transformed_a, _block_walker, _params);
+ _matrix_multiply = instantiate_matrix_multiply<float, float>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta);
+ break;
+#ifdef __aarch64__
+ case DataType::U8:
+ case DataType::QASYMM8:
+ if(use_dot)
+ {
+ _transform_a = instantiate_transformA<uint8_t, true>(_a, &_transformed_a, _block_walker, _params);
+ _matrix_multiply = instantiate_matrix_multiply<uint8_t, uint32_t, true>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta);
+ }
+ else
+ {
+ _transform_a = instantiate_transformA<uint8_t, false>(_a, &_transformed_a, _block_walker, _params);
+ _matrix_multiply = instantiate_matrix_multiply<uint8_t, uint32_t, false>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta);
+ }
+ break;
+ case DataType::S8:
+ if(use_dot)
+ {
+ _transform_a = instantiate_transformA<int8_t, true>(_a, &_transformed_a, _block_walker, _params);
+ _matrix_multiply = instantiate_matrix_multiply<int8_t, int32_t, true>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta);
+ }
+ else
+ {
+ _transform_a = instantiate_transformA<int8_t, false>(_a, &_transformed_a, _block_walker, _params);
+ _matrix_multiply = instantiate_matrix_multiply<int8_t, int32_t, false>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta);
+ }
+ break;
+#endif /* __aarch64__ */
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ case DataType::F16:
+ _transform_a = instantiate_transformA<__fp16>(_a, &_transformed_a, _block_walker, _params);
+ _matrix_multiply = instantiate_matrix_multiply<__fp16, __fp16>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta);
+ break;
+ break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ break;
+ }
+ ARM_COMPUTE_ERROR_ON(_transform_a == nullptr);
+ ARM_COMPUTE_ERROR_ON(_matrix_multiply == nullptr);
+ _transformed_a.allocator()->allocate();
+ _tmp_c.allocator()->allocate();
+ _transformed_b.allocator()->allocate();
+}
+} // namespace arm_compute