aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/assembly
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-01-09 18:35:17 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-01-18 13:41:40 +0000
commit7cd26d4a1b14bc4bf7c61496803416ab3d84791f (patch)
tree12cc4a27d7ecebc69a43e96b1f46c7eb05437978 /src/core/NEON/kernels/assembly
parent3ac2f3a1d9297220d1b0ce920dd13fdd4edcc187 (diff)
downloadComputeLibrary-7cd26d4a1b14bc4bf7c61496803416ab3d84791f.tar.gz
COMPMID-1867: Add NEON/SVE GEMM Hybrid kernels.
Change-Id: Ib40a9921e7f9a6a8be6c38872d6b3a0f24ed0cd3 Reviewed-on: https://review.mlplatform.org/515 Reviewed-by: Anthony Barbier <Anthony.barbier@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/assembly')
-rw-r--r--src/core/NEON/kernels/assembly/Helpers.cpp100
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp152
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp189
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h239
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp118
-rw-r--r--src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp6
6 files changed, 191 insertions, 613 deletions
diff --git a/src/core/NEON/kernels/assembly/Helpers.cpp b/src/core/NEON/kernels/assembly/Helpers.cpp
index 09ac08c0a4..3d8d66d7fc 100644
--- a/src/core/NEON/kernels/assembly/Helpers.cpp
+++ b/src/core/NEON/kernels/assembly/Helpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,91 +24,47 @@
#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
-#include "NEGEMMInterleavedStrategies.h"
+#include "arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp"
namespace arm_compute
{
-namespace
-{
-template <typename InputType, bool use_dot = false>
-BlockSizes calculate_block_sizes_template(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K)
-{
- using strategy = typename Kernel<InputType, use_dot>::strategy;
- return calculate_block_sizes<strategy>(ci, M, N, K);
-}
-} // namespace
-
-const char *get_strategy_name(DataType input_type, bool use_dot)
+arm_gemm::KernelDescription get_gemm_info(DataType input_type,
+ const CPUInfo &ci,
+ const unsigned int num_threads,
+ const INEGEMMWrapperKernel::Params &p,
+ float alpha,
+ float beta,
+ bool pretranspose_hint)
{
switch(input_type)
{
- case DataType::F32:
- return Kernel<float>::name;
#ifdef __aarch64__
- case DataType::U8:
case DataType::QASYMM8:
- if(use_dot)
- {
- return Kernel<uint8_t, true>::name;
- }
- else
- {
- return Kernel<uint8_t, false>::name;
- }
- case DataType::S8:
- if(use_dot)
- {
- return Kernel<int8_t, true>::name;
- }
- else
- {
- return Kernel<int8_t, false>::name;
- }
-#endif /* __aarch64__ */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F16:
- return Kernel<__fp16>::name;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- default:
- ARM_COMPUTE_ERROR("DataType not supported");
- break;
- }
-}
-
-BlockSizes calculate_block_sizes_from_data_type(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K, DataType input_type, bool use_dot)
-{
- switch(input_type)
- {
- case DataType::F32:
- return calculate_block_sizes_template<float>(ci, M, N, K);
-#ifdef __aarch64__
case DataType::U8:
- case DataType::QASYMM8:
- if(use_dot)
- {
- return calculate_block_sizes_template<uint8_t, true>(ci, M, N, K);
- }
- else
- {
- return calculate_block_sizes_template<uint8_t, false>(ci, M, N, K);
- }
+ {
+ arm_gemm::GemmArgs<uint32_t> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint);
+ return arm_gemm::get_gemm_method<uint8_t, uint32_t>(args);
+ }
case DataType::S8:
- if(use_dot)
- {
- return calculate_block_sizes_template<int8_t, true>(ci, M, N, K);
- }
- else
- {
- return calculate_block_sizes_template<int8_t, false>(ci, M, N, K);
- }
-#endif /* __aarch64__ */
+ {
+ arm_gemm::GemmArgs<int32_t> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint);
+ return arm_gemm::get_gemm_method<int8_t, int32_t>(args);
+ }
+#endif // __aarch64__
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
- return calculate_block_sizes_template<__fp16>(ci, M, N, K);
+ {
+ arm_gemm::GemmArgs<__fp16> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint);
+ return arm_gemm::get_gemm_method<__fp16, __fp16>(args);
+ }
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ case DataType::F32:
+ {
+ arm_gemm::GemmArgs<float> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint);
+ return arm_gemm::get_gemm_method<float, float>(args);
+ }
default:
- ARM_COMPUTE_ERROR("DataType not supported");
- break;
+ return arm_gemm::KernelDescription();
}
}
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp
deleted file mode 100644
index 3b2975dd80..0000000000
--- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
-
-#include "NEGEMMInterleavedStrategies.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/WindowIterator.h"
-
-namespace arm_compute
-{
-template <typename To, typename Tr, bool use_dot>
-void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker,
- const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params &params, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- _prepared_a = prepared_a;
- _transformed_b = transformed_b;
- _tmp_c = tmp_c;
- _c = c;
- _block_walker = block_walker;
- _block_sizes = block_sizes;
- _params = params;
- _b_is_pretransposed = b_is_pretransposed;
- _alpha = alpha;
- _beta = beta;
-
- auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads }));
-}
-
-template <typename To, typename Tr, bool use_dot>
-void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset,
- const Coordinates &end_offset)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- strategy strat(info.cpu_info);
- TensorAccessor<To> prepared_a(*_prepared_a);
- TensorAccessor<To> transformed_b(*_transformed_b);
- TensorAccessor<Tr> c(*_c);
- TensorAccessor<Tr> tmp_c(*_tmp_c);
-
- int prev_batch = -1;
- To *a_ptr = nullptr;
- auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
- {
- const unsigned int y = id.x();
- const unsigned int batch = id.y();
- const unsigned int ymax = std::min(_params.M, y + strategy::out_height());
-
- // If it's the first block of a new batch then reset the pointer to A.
- if(prev_batch != static_cast<int>(batch))
- {
- const unsigned int first_m = id.x();
- a_ptr = prepared_a(0, first_m, batch);
- prev_batch = batch;
- }
-
- // Call matrix multiply assembly routine to process the block:
- strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k);
- a_ptr += strategy::out_height() * wl._kern_k;
-
- // Merge the result with the other blocks' results:
- strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast<Tr>(1)));
- });
- auto on_new_row_size = [&](unsigned int start, unsigned int end)
- {
- //Nothing to do
- };
- window_iterator.iterate_2D(on_new_row_size);
-}
-
-template <typename To, typename Tr, bool use_dot>
-void NEGEMMInterleavedMatrixMultiplyWrapperTemplate<To, Tr, use_dot>::create_workloads(std::vector<MatrixMultiplyWorkload> &workloads)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- unsigned int offset_transformed_b = 0;
- unsigned int wl_index = 0;
- unsigned int num_buffers = 0, reshaped_block_size = 0;
-
- if(!_b_is_pretransposed)
- {
- num_buffers = _transformed_b->info()->tensor_shape()[1];
- reshaped_block_size = _transformed_b->info()->tensor_shape()[0];
- }
- execute_window_loop(_block_walker, [&](const Coordinates & id)
- {
- const unsigned int x0 = id.x();
- const unsigned int k0 = id.y();
- const unsigned int multi = id.z();
-
- const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N);
- const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K);
-
- // Figure out how many "K" the kernel will actually process.
- const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll());
- const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width());
-
- workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks));
-
- if(_b_is_pretransposed)
- {
- offset_transformed_b += bblocks * strategy::out_width() * kern_k;
- }
- else
- {
- // Rotate through the BufferManager's buffers:
- wl_index++;
- offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size;
- }
- });
-}
-
-//TODO: regroup somewhere ?
-template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<float, float>;
-#ifdef __aarch64__
-template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<uint8_t, uint32_t>;
-template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<int8_t, int32_t>;
-template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<uint8_t, uint32_t, true>;
-template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<int8_t, int32_t, true>;
-#endif /* __aarch64__ */
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate<float16_t, float16_t>;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp
deleted file mode 100644
index 7fc57f3c02..0000000000
--- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h"
-
-#include "NEGEMMInterleavedStrategies.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-
-namespace arm_compute
-{
-namespace
-{
-// Call the lambda function for each workload generated by the passed window.
-template <typename To, bool use_dot, bool use_buffer_manager, typename Lambda>
-void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
- unsigned int wl_index = 0;
- unsigned int num_buffers = 0, reshaped_block_size = 0;
-
- if(use_buffer_manager)
- {
- num_buffers = transformed_b->info()->tensor_shape()[1];
- reshaped_block_size = transformed_b->info()->strides_in_bytes().y();
- }
-
- unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes();
- execute_window_loop(window, [&](const Coordinates & coordinates)
- {
- const unsigned int x0 = coordinates.x();
- const unsigned int k0 = coordinates.y();
- const unsigned int multi = coordinates.z();
-
- const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi));
- const unsigned int xmax = std::min(x0 + window.x().step(), N);
- const unsigned int kmax = std::min(k0 + window.y().step(), K);
-
- /* Figure out the size of each block. */
- unsigned int x_size = (xmax - x0);
- unsigned int k_size = (kmax - k0);
-
- /* Round sizes up as needed. */
- x_size = ceil_to_multiple(x_size, strategy::out_width());
- k_size = ceil_to_multiple(k_size, strategy::k_unroll());
-
- lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax));
-
- //Each workload represents one block:
- if(use_buffer_manager)
- {
- // Rotate through the BufferManager's buffers:
- wl_index++;
- offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size;
- }
- else
- {
- offset_transformed_b += (x_size * k_size * sizeof(To));
- }
- });
-}
-
-// Calculate the size of transformed_b:
-template <typename To, bool use_dot>
-unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs, unsigned int multis)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- // How many full blocks do N / K contain ?
- size_t num_full_k = K / bs.k_block;
- size_t num_full_x = N / bs.x_block;
-
- ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0);
- ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0);
-
- size_t normal_x_size = bs.x_block;
- size_t normal_k_size = bs.k_block;
-
- // Round up the leftovers to be a multiple of the strategy processing size:
- size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width());
- size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll());
-
- // Calculate the total size of the buffer:
- size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size);
- total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size);
-
- total *= multis;
-
- return total;
-}
-
-} // namespace
-
-template <typename To, bool use_dot>
-BlockSizes NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::block_sizes() const
-{
- return _block_sizes;
-}
-
-template <typename To, bool use_dot>
-void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- const unsigned int multis = b->info()->tensor_shape().z();
- _Nsize = b->info()->tensor_shape().x();
- _Ksize = b->info()->tensor_shape().y();
- _b = b;
- _transformed_b = transformed_b;
- _transpose_b = transpose_b;
-
- _block_sizes = calculate_block_sizes<strategy>(ci, params.M, params.N, params.K);
-
- auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ get_B_pretransposed_array_size<To, use_dot>(_Nsize, _Ksize, _block_sizes, multis) }));
-
- Window window;
- window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block));
- window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block));
- window.set(Window::DimZ, Window::Dimension(0, multis));
-
- INEKernel::configure(window);
-}
-
-template <typename To, bool use_dot>
-void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::transform(const PrepareBWorkload &wl, const ThreadInfo &info)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- strategy strat(info.cpu_info);
- strat.transforms.PrepareB(reinterpret_cast<To *>(_transformed_b->buffer() + wl._offset_transformed_b),
- reinterpret_cast<To *>(_b->buffer() + wl._offset_b),
- _b->info()->strides_in_bytes().y() / sizeof(To),
- wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b);
-}
-
-template <typename To, bool use_dot>
-void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::create_workloads(std::vector<PrepareBWorkload> &workloads)
-{
- for_each_element_in_window<To, use_dot, true>(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl)
- {
- workloads.push_back(std::move(wl));
- });
-}
-
-template <typename To, bool use_dot>
-void NEGEMMInterleavedPrepareBWrapperKernelTemplate<To, use_dot>::run(const Window &window, const ThreadInfo &info)
-{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window());
- for_each_element_in_window<To, use_dot, false>(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl)
- {
- this->transform(wl, info);
- });
-}
-
-template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<float>;
-#ifdef __aarch64__
-template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<uint8_t>;
-template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<int8_t>;
-template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<uint8_t, true>;
-template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<int8_t, true>;
-#endif /* __aarch64__ */
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template class NEGEMMInterleavedPrepareBWrapperKernelTemplate<float16_t>;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
index 69842fec80..da6ef2dea9 100644
--- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
+++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,90 +44,175 @@
namespace arm_compute
{
-namespace
+namespace detail
{
-template <typename To, bool use_dot = false>
-struct Kernel
+/** GEMM Interleaved Strategy interface */
+class IInterleavedStrategy
{
+public:
+ /** Virtual Destructor */
+ virtual ~IInterleavedStrategy() = default;
+ /** Instantiate and configure a prepareB Kernel
+ *
+ * @param[in] b Input tensor B.
+ * @param[in] transformed_b Reshaped tensor B.
+ * @param[in] params GM, N, K sizes.
+ * @param[in] ci CPUInfo to be used for kernel configuration.
+ *
+ * @return A wrapped specialized prepareB kernel
+ */
+ virtual std::unique_ptr<NEGEMMInterleavedPrepareBWrapperKernel> instantiate_prepareB(const ITensor *b,
+ ITensor *transformed_b,
+ const INEGEMMWrapperKernel::Params &params,
+ const CPUInfo &ci) = 0;
+ /** Instantiate and configure a transformA Kernel
+ *
+ * @param[in] a Input tensor A.
+ * @param[in] transformed_a Reshaped tensor A.
+ * @param[in] block_walker Window representing the layout of the matrix's blocks.
+ * @param[in] params M, N, K sizes.
+ *
+ * @return A wrapped specialized transformA kernel
+ */
+ virtual std::unique_ptr<NEGEMMInterleavedTransformAWrapper> instantiate_transformA(const ITensor *a,
+ ITensor *transformed_a,
+ const Window &block_walker,
+ const INEGEMMWrapperKernel::Params &params) = 0;
+ /** Instantiate and configure a prepareB Kernel
+ *
+ * @param transformed_a Already reshaped tensor A.
+ * @param transformed_b Already reshaped tensor B.
+ * @param tmp_c Temporary buffer to be used to store intermediate results.
+ * @param c Result tensor C.
+ * @param block_walker Window containing iteration information for the M and batch dimensions.
+ * @param block_sizes Block sizes to use for the matrix multiplication (A & B must have been reshaped using these same block sizes).
+ * @param params M, N, K sizes.
+ * @param alpha Alpha value
+ * @param beta Beta value
+ * @param pretranspose_b Is B also pretransposed ?
+ * @param num_threads Maximum number of threads that might be used for the calculations.
+ *
+ * @return A wrapped specialized MatrixMultiply kernel
+ */
+ virtual std::unique_ptr<NEGEMMInterleavedMatrixMultiplyWrapper> instantiate_matrix_multiply(const ITensor *transformed_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c,
+ const Window &block_walker, const BlockSizes &block_sizes,
+ const INEGEMMWrapperKernel::Params &params, float alpha, float beta, bool pretranspose_b,
+ unsigned int num_threads) = 0;
+ /** Calculates the block sizes of a given strategy
+ *
+ * @param[in] ci CPUInfo to be used for kernel configuration.
+ * @param[in] params M, N, K sizes.
+ *
+ * @return BlockSizes for a given strategy
+ */
+ virtual BlockSizes calculate_block_sizes_for_strategy(const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params) = 0;
};
-#define DEFINE_STRATEGY_SUFFIX(strat, suffix) \
- using strategy = arm_gemm::strat; \
- static constexpr const char *name = #strat suffix;
-
-#define DEFINE_STRATEGY(strat) \
- DEFINE_STRATEGY_SUFFIX(strat, "")
-
-#ifdef __ARM_FEATURE_SVE
-template <>
-struct Kernel<float, false>
-{
- DEFINE_STRATEGY(interleaved_fp32_mla_3VLx8)
-};
-template <>
-struct Kernel<float16_t, false>
-{
- DEFINE_STRATEGY(interleaved_fp16_mla_3VLx8)
-};
-template <bool use_dot>
-struct Kernel<int8_t, use_dot>
-{
- DEFINE_STRATEGY(interleaved_s8s32_dot_3VLx8)
-};
-template <bool use_dot>
-struct Kernel<uint8_t, use_dot>
+/** Interleaved Strategy class */
+template <typename StrategyType>
+class InterleavedStrategy : public IInterleavedStrategy
{
- DEFINE_STRATEGY(interleaved_u8u32_dot_3VLx8)
-};
-#else /* __ARM_FEATURE_SVE */
+public:
+ using strategy = StrategyType;
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template <>
-struct Kernel<float16_t, false>
-{
- DEFINE_STRATEGY(hgemm_24x8)
-};
-#endif /*__ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-#ifdef __aarch64__
-template <>
-struct Kernel<float, false>
-{
- DEFINE_STRATEGY(sgemm_12x8)
-};
-template <>
-struct Kernel<int8_t, false>
-{
- DEFINE_STRATEGY(gemm_s8_4x4)
-};
-template <>
-struct Kernel<uint8_t, false>
-{
- DEFINE_STRATEGY(gemm_u8_4x4)
-};
+public:
+ // Inherited methods overridden
+ std::unique_ptr<NEGEMMInterleavedPrepareBWrapperKernel> instantiate_prepareB(const ITensor *b,
+ ITensor *transformed_b,
+ const INEGEMMWrapperKernel::Params &params,
+ const CPUInfo &ci) override
+ {
+ auto prepare_b = support::cpp14::make_unique<NEGEMMInterleavedPrepareBWrapperKernelTemplate<strategy>>();
+ prepare_b->configure(b, transformed_b, false, ci, params);
+ return std::move(prepare_b);
+ }
+ std::unique_ptr<NEGEMMInterleavedTransformAWrapper> instantiate_transformA(const ITensor *a,
+ ITensor *transformed_a,
+ const Window &block_walker,
+ const INEGEMMWrapperKernel::Params &params) override
+ {
+ auto transform_a = support::cpp14::make_unique<NEGEMMInterleavedTransformAWrapperTemplate<strategy>>();
+ transform_a->configure(a, transformed_a, false, block_walker, params);
+ return std::move(transform_a);
+ }
+ std::unique_ptr<NEGEMMInterleavedMatrixMultiplyWrapper> instantiate_matrix_multiply(const ITensor *transformed_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c,
+ const Window &block_walker, const BlockSizes &block_sizes,
+ const INEGEMMWrapperKernel::Params &params, float alpha, float beta, bool pretranspose_b,
+ unsigned int num_threads) override
+ {
+ auto matrix_multiply = support::cpp14::make_unique<NEGEMMInterleavedMatrixMultiplyWrapperTemplate<strategy>>();
+ matrix_multiply->configure(transformed_a, transformed_b, tmp_c, c, block_walker, block_sizes, params, pretranspose_b, alpha, beta, num_threads);
+ return std::move(matrix_multiply);
+ }
-//Use different strategies for 8bit dot product:
-template <>
-struct Kernel<int8_t, true>
-{
- DEFINE_STRATEGY_SUFFIX(gemm_s8_12x8, "_dot")
+ BlockSizes calculate_block_sizes_for_strategy(const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params) override
+ {
+ return calculate_block_sizes<strategy>(ci, params.M, params.N, params.K);
+ }
};
-template <>
-struct Kernel<uint8_t, true>
-{
- DEFINE_STRATEGY_SUFFIX(gemm_u8_12x8, "_dot")
-};
-#else
-template <>
-struct Kernel<float, false>
-{
- DEFINE_STRATEGY(sgemm_8x6)
-};
-#endif /* __aarch64__ */
-#endif /* __ARM_FEATURE_SVE */
-
-#undef DEFINE_STRATEGY
-#undef DEFINE_STRATEGY_SUFFIX
-} // namespace
+/** Create the backend GEMM strategy to use given the provided kernel info
+ *
+ * @param[in] kernel_name Kernel name of the backend strategy to instantiate
+ *
+ * @return The requested kernel strategy if exists else nullptr
+ */
+std::unique_ptr<IInterleavedStrategy> create_strategy(const std::string &kernel_name)
+{
+#if defined(__arm__)
+ if(kernel_name.find("sgemm_8x6") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::sgemm_8x6>>();
+ }
+#endif // defined(__arm__)
+#if defined(__aarch64__)
+ if(kernel_name.find("gemm_s8_4x4") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::gemm_s8_4x4>>();
+ }
+ if(kernel_name.find("gemm_s8_12x8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::gemm_s8_12x8>>();
+ }
+ if(kernel_name.find("gemm_u8_4x4") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::gemm_u8_4x4>>();
+ }
+ if(kernel_name.find("gemm_u8_12x8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::gemm_u8_12x8>>();
+ }
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ if(kernel_name.find("hgemm_24x8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::hgemm_24x8>>();
+ }
+#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ if(kernel_name.find("sgemm_12x8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::sgemm_12x8>>();
+ }
+#if defined(__ARM_FEATURE_SVE)
+ if(kernel_name.find("interleaved_fp16_mla_3VLx8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::interleaved_fp16_mla_3VLx8>>();
+ }
+ if(kernel_name.find("interleaved_fp32_mla_3VLx8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::interleaved_fp32_mla_3VLx8>>();
+ }
+ if(kernel_name.find("interleaved_s8s32_dot_3VLx8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::interleaved_s8s32_dot_3VLx8>>();
+ }
+ if(kernel_name.find("interleaved_u8u32_dot_3VLx8") != std::string::npos)
+ {
+ return support::cpp14::make_unique<InterleavedStrategy<arm_gemm::interleaved_u8u32_dot_3VLx8>>();
+ }
+#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(__aarch64__)_
+ return nullptr;
+}
+} // namespace detail
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ */
diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp
deleted file mode 100644
index 3b80a1f940..0000000000
--- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h"
-
-#include "NEGEMMInterleavedStrategies.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/WindowIterator.h"
-
-#include "utils/TypePrinter.h"
-
-namespace arm_compute
-{
-template <typename To, bool use_dot>
-void NEGEMMInterleavedTransformAWrapperTemplate<To, use_dot>::configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker,
- const INEGEMMWrapperKernel::Params &params)
-{
- _a = a;
- _transformed_a = transformed_a;
- _transpose_a = transpose_a;
- _Ksize = params.K;
- _Msize = params.M;
- _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension
-}
-
-template <typename To, bool use_dot>
-void NEGEMMInterleavedTransformAWrapperTemplate<To, use_dot>::transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset,
- const Coordinates &end_offset)
-{
- using strategy = typename Kernel<To, use_dot>::strategy;
-
- strategy strat(info.cpu_info);
- TensorAccessor<To> a(*_a);
- TensorAccessor<To> transformed_a(*_transformed_a);
-
- if(_a->info()->data_layout() == DataLayout::NHWC)
- {
- // In the case of NHWC we want to interpret the output shape as 3D. Thus, the batch stride for A is
- // the relevant multiple of the row stride.
- const size_t nhwc_batch_stride = _a->info()->strides_in_bytes().y() * _Msize;
- a.set_stride(2, nhwc_batch_stride);
- }
-
- unsigned int last_m = 0;
- //TODO: Create a new iterate_1D( DimY);
- int last_y = -1;
- auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
- {
- if(id.y() != last_y)
- {
- last_y = id.y();
- unsigned int batch = id.y();
- unsigned int first_m = id.x();
-
- if(first_m >= last_m)
- return;
-
- strat.transforms.PrepareA(transformed_a(0, first_m, batch),
- a(0, 0, batch, wl._multi),
- a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a);
- }
- });
- auto on_new_row_size = [&](unsigned int start, unsigned int end)
- {
- last_m = std::min(end, _Msize);
- };
- window_iterator.iterate_2D(on_new_row_size);
-}
-
-template <typename To, bool use_dot>
-void NEGEMMInterleavedTransformAWrapperTemplate<To, use_dot>::create_workloads(std::vector<TransformAWorkload> &workloads)
-{
- execute_window_loop(_k_multi_window, [&](const Coordinates & id)
- {
- const unsigned int k0 = id.x();
- const unsigned int multi = id.y();
- const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize);
-
- workloads.push_back(TransformAWorkload(k0, kmax, multi));
- });
-}
-
-template class NEGEMMInterleavedTransformAWrapperTemplate<float>;
-#ifdef __aarch64__
-template class NEGEMMInterleavedTransformAWrapperTemplate<uint8_t>;
-template class NEGEMMInterleavedTransformAWrapperTemplate<int8_t>;
-template class NEGEMMInterleavedTransformAWrapperTemplate<uint8_t, true>;
-template class NEGEMMInterleavedTransformAWrapperTemplate<int8_t, true>;
-#endif /* __aarch64__ */
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template class NEGEMMInterleavedTransformAWrapperTemplate<float16_t>;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
index e452dfbcf2..7b1f3e7ba0 100644
--- a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
+++ b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,11 +34,7 @@
#include "../arm_gemm/mergeresults.hpp"
#include "../arm_gemm/transform.hpp"
-#include "../arm_gemm/kernels/a32_sgemm_8x6.hpp"
-#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp"
#include "../arm_gemm/kernels/a64_sgemm_native_16x4.hpp"
-#include "../arm_gemm/kernels/a64_sgemv_pretransposed.hpp"
-#include "../arm_gemm/kernels/a64_sgemv_trans.hpp"
namespace arm_compute
{