From 7cd26d4a1b14bc4bf7c61496803416ab3d84791f Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 9 Jan 2019 18:35:17 +0000 Subject: COMPMID-1867: Add NEON/SVE GEMM Hybrid kernels. Change-Id: Ib40a9921e7f9a6a8be6c38872d6b3a0f24ed0cd3 Reviewed-on: https://review.mlplatform.org/515 Reviewed-by: Anthony Barbier Tested-by: Arm Jenkins --- src/core/NEON/kernels/assembly/Helpers.cpp | 100 +++------ .../NEGEMMInterleavedMatrixMultiplyWrapper.cpp | 152 ------------- .../NEGEMMInterleavedPrepareBWrapperKernel.cpp | 189 ---------------- .../kernels/assembly/NEGEMMInterleavedStrategies.h | 239 ++++++++++++++------- .../NEGEMMInterleavedTransformAWrapper.cpp | 118 ---------- .../kernels/assembly/NEGEMMNativeWrapperKernel.cpp | 6 +- 6 files changed, 191 insertions(+), 613 deletions(-) delete mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp delete mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp delete mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp (limited to 'src/core/NEON/kernels/assembly') diff --git a/src/core/NEON/kernels/assembly/Helpers.cpp b/src/core/NEON/kernels/assembly/Helpers.cpp index 09ac08c0a4..3d8d66d7fc 100644 --- a/src/core/NEON/kernels/assembly/Helpers.cpp +++ b/src/core/NEON/kernels/assembly/Helpers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,91 +24,47 @@ #include "arm_compute/core/NEON/kernels/assembly/Helpers.h" -#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp" namespace arm_compute { -namespace -{ -template -BlockSizes calculate_block_sizes_template(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K) -{ - using strategy = typename Kernel::strategy; - return calculate_block_sizes(ci, M, N, K); -} -} // namespace - -const char *get_strategy_name(DataType input_type, bool use_dot) +arm_gemm::KernelDescription get_gemm_info(DataType input_type, + const CPUInfo &ci, + const unsigned int num_threads, + const INEGEMMWrapperKernel::Params &p, + float alpha, + float beta, + bool pretranspose_hint) { switch(input_type) { - case DataType::F32: - return Kernel::name; #ifdef __aarch64__ - case DataType::U8: case DataType::QASYMM8: - if(use_dot) - { - return Kernel::name; - } - else - { - return Kernel::name; - } - case DataType::S8: - if(use_dot) - { - return Kernel::name; - } - else - { - return Kernel::name; - } -#endif /* __aarch64__ */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - return Kernel<__fp16>::name; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - default: - ARM_COMPUTE_ERROR("DataType not supported"); - break; - } -} - -BlockSizes calculate_block_sizes_from_data_type(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K, DataType input_type, bool use_dot) -{ - switch(input_type) - { - case DataType::F32: - return calculate_block_sizes_template(ci, M, N, K); -#ifdef __aarch64__ case DataType::U8: - case DataType::QASYMM8: - if(use_dot) - { - return calculate_block_sizes_template(ci, M, N, K); - } - else - { - return calculate_block_sizes_template(ci, M, N, K); - } + { + arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint); + return arm_gemm::get_gemm_method(args); + } case DataType::S8: - if(use_dot) - { - return calculate_block_sizes_template(ci, M, N, K); - } - else - { - return calculate_block_sizes_template(ci, M, N, K); - } -#endif /* __aarch64__ */ + { + arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint); + return arm_gemm::get_gemm_method(args); + } +#endif // __aarch64__ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: - return calculate_block_sizes_template<__fp16>(ci, M, N, K); + { + arm_gemm::GemmArgs<__fp16> args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint); + return arm_gemm::get_gemm_method<__fp16, __fp16>(args); + } #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + case DataType::F32: + { + arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint); + return arm_gemm::get_gemm_method(args); + } default: - ARM_COMPUTE_ERROR("DataType not supported"); - break; + return arm_gemm::KernelDescription(); } } } // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp deleted file mode 100644 index 3b2975dd80..0000000000 --- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h" - -#include "NEGEMMInterleavedStrategies.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/WindowIterator.h" - -namespace arm_compute -{ -template -void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker, - const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params ¶ms, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads) -{ - using strategy = typename Kernel::strategy; - - _prepared_a = prepared_a; - _transformed_b = transformed_b; - _tmp_c = tmp_c; - _c = c; - _block_walker = block_walker; - _block_sizes = block_sizes; - _params = params; - _b_is_pretransposed = b_is_pretransposed; - _alpha = alpha; - _beta = beta; - - auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads })); -} - -template -void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, - const Coordinates &end_offset) -{ - using strategy = typename Kernel::strategy; - - strategy strat(info.cpu_info); - TensorAccessor prepared_a(*_prepared_a); - TensorAccessor transformed_b(*_transformed_b); - TensorAccessor c(*_c); - TensorAccessor tmp_c(*_tmp_c); - - int prev_batch = -1; - To *a_ptr = nullptr; - auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) - { - const unsigned int y = id.x(); - const unsigned int batch = id.y(); - const unsigned int ymax = std::min(_params.M, y + strategy::out_height()); - - // If it's the first block of a new batch then reset the pointer to A. - if(prev_batch != static_cast(batch)) - { - const unsigned int first_m = id.x(); - a_ptr = prepared_a(0, first_m, batch); - prev_batch = batch; - } - - // Call matrix multiply assembly routine to process the block: - strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k); - a_ptr += strategy::out_height() * wl._kern_k; - - // Merge the result with the other blocks' results: - strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast(1))); - }); - auto on_new_row_size = [&](unsigned int start, unsigned int end) - { - //Nothing to do - }; - window_iterator.iterate_2D(on_new_row_size); -} - -template -void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::create_workloads(std::vector &workloads) -{ - using strategy = typename Kernel::strategy; - - unsigned int offset_transformed_b = 0; - unsigned int wl_index = 0; - unsigned int num_buffers = 0, reshaped_block_size = 0; - - if(!_b_is_pretransposed) - { - num_buffers = _transformed_b->info()->tensor_shape()[1]; - reshaped_block_size = _transformed_b->info()->tensor_shape()[0]; - } - execute_window_loop(_block_walker, [&](const Coordinates & id) - { - const unsigned int x0 = id.x(); - const unsigned int k0 = id.y(); - const unsigned int multi = id.z(); - - const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N); - const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K); - - // Figure out how many "K" the kernel will actually process. - const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll()); - const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width()); - - workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks)); - - if(_b_is_pretransposed) - { - offset_transformed_b += bblocks * strategy::out_width() * kern_k; - } - else - { - // Rotate through the BufferManager's buffers: - wl_index++; - offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size; - } - }); -} - -//TODO: regroup somewhere ? -template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; -#ifdef __aarch64__ -template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; -template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; -template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; -template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; -#endif /* __aarch64__ */ - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp deleted file mode 100644 index 7fc57f3c02..0000000000 --- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h" - -#include "NEGEMMInterleavedStrategies.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" - -namespace arm_compute -{ -namespace -{ -// Call the lambda function for each workload generated by the passed window. -template -void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda) -{ - using strategy = typename Kernel::strategy; - unsigned int wl_index = 0; - unsigned int num_buffers = 0, reshaped_block_size = 0; - - if(use_buffer_manager) - { - num_buffers = transformed_b->info()->tensor_shape()[1]; - reshaped_block_size = transformed_b->info()->strides_in_bytes().y(); - } - - unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes(); - execute_window_loop(window, [&](const Coordinates & coordinates) - { - const unsigned int x0 = coordinates.x(); - const unsigned int k0 = coordinates.y(); - const unsigned int multi = coordinates.z(); - - const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi)); - const unsigned int xmax = std::min(x0 + window.x().step(), N); - const unsigned int kmax = std::min(k0 + window.y().step(), K); - - /* Figure out the size of each block. */ - unsigned int x_size = (xmax - x0); - unsigned int k_size = (kmax - k0); - - /* Round sizes up as needed. */ - x_size = ceil_to_multiple(x_size, strategy::out_width()); - k_size = ceil_to_multiple(k_size, strategy::k_unroll()); - - lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax)); - - //Each workload represents one block: - if(use_buffer_manager) - { - // Rotate through the BufferManager's buffers: - wl_index++; - offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size; - } - else - { - offset_transformed_b += (x_size * k_size * sizeof(To)); - } - }); -} - -// Calculate the size of transformed_b: -template -unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs, unsigned int multis) -{ - using strategy = typename Kernel::strategy; - - // How many full blocks do N / K contain ? - size_t num_full_k = K / bs.k_block; - size_t num_full_x = N / bs.x_block; - - ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0); - ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0); - - size_t normal_x_size = bs.x_block; - size_t normal_k_size = bs.k_block; - - // Round up the leftovers to be a multiple of the strategy processing size: - size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width()); - size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll()); - - // Calculate the total size of the buffer: - size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size); - total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size); - - total *= multis; - - return total; -} - -} // namespace - -template -BlockSizes NEGEMMInterleavedPrepareBWrapperKernelTemplate::block_sizes() const -{ - return _block_sizes; -} - -template -void NEGEMMInterleavedPrepareBWrapperKernelTemplate::configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params ¶ms) -{ - using strategy = typename Kernel::strategy; - - const unsigned int multis = b->info()->tensor_shape().z(); - _Nsize = b->info()->tensor_shape().x(); - _Ksize = b->info()->tensor_shape().y(); - _b = b; - _transformed_b = transformed_b; - _transpose_b = transpose_b; - - _block_sizes = calculate_block_sizes(ci, params.M, params.N, params.K); - - auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ get_B_pretransposed_array_size(_Nsize, _Ksize, _block_sizes, multis) })); - - Window window; - window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block)); - window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block)); - window.set(Window::DimZ, Window::Dimension(0, multis)); - - INEKernel::configure(window); -} - -template -void NEGEMMInterleavedPrepareBWrapperKernelTemplate::transform(const PrepareBWorkload &wl, const ThreadInfo &info) -{ - using strategy = typename Kernel::strategy; - - strategy strat(info.cpu_info); - strat.transforms.PrepareB(reinterpret_cast(_transformed_b->buffer() + wl._offset_transformed_b), - reinterpret_cast(_b->buffer() + wl._offset_b), - _b->info()->strides_in_bytes().y() / sizeof(To), - wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b); -} - -template -void NEGEMMInterleavedPrepareBWrapperKernelTemplate::create_workloads(std::vector &workloads) -{ - for_each_element_in_window(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl) - { - workloads.push_back(std::move(wl)); - }); -} - -template -void NEGEMMInterleavedPrepareBWrapperKernelTemplate::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window()); - for_each_element_in_window(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl) - { - this->transform(wl, info); - }); -} - -template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; -#ifdef __aarch64__ -template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; -template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; -template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; -template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; -#endif /* __aarch64__ */ - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h index 69842fec80..da6ef2dea9 100644 --- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -44,90 +44,175 @@ namespace arm_compute { -namespace +namespace detail { -template -struct Kernel +/** GEMM Interleaved Strategy interface */ +class IInterleavedStrategy { +public: + /** Virtual Destructor */ + virtual ~IInterleavedStrategy() = default; + /** Instantiate and configure a prepareB Kernel + * + * @param[in] b Input tensor B. + * @param[in] transformed_b Reshaped tensor B. + * @param[in] params GM, N, K sizes. + * @param[in] ci CPUInfo to be used for kernel configuration. + * + * @return A wrapped specialized prepareB kernel + */ + virtual std::unique_ptr instantiate_prepareB(const ITensor *b, + ITensor *transformed_b, + const INEGEMMWrapperKernel::Params ¶ms, + const CPUInfo &ci) = 0; + /** Instantiate and configure a transformA Kernel + * + * @param[in] a Input tensor A. + * @param[in] transformed_a Reshaped tensor A. + * @param[in] block_walker Window representing the layout of the matrix's blocks. + * @param[in] params M, N, K sizes. + * + * @return A wrapped specialized transformA kernel + */ + virtual std::unique_ptr instantiate_transformA(const ITensor *a, + ITensor *transformed_a, + const Window &block_walker, + const INEGEMMWrapperKernel::Params ¶ms) = 0; + /** Instantiate and configure a prepareB Kernel + * + * @param transformed_a Already reshaped tensor A. + * @param transformed_b Already reshaped tensor B. + * @param tmp_c Temporary buffer to be used to store intermediate results. + * @param c Result tensor C. + * @param block_walker Window containing iteration information for the M and batch dimensions. + * @param block_sizes Block sizes to use for the matrix multiplication (A & B must have been reshaped using these same block sizes). + * @param params M, N, K sizes. + * @param alpha Alpha value + * @param beta Beta value + * @param pretranspose_b Is B also pretransposed ? + * @param num_threads Maximum number of threads that might be used for the calculations. + * + * @return A wrapped specialized MatrixMultiply kernel + */ + virtual std::unique_ptr instantiate_matrix_multiply(const ITensor *transformed_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, + const Window &block_walker, const BlockSizes &block_sizes, + const INEGEMMWrapperKernel::Params ¶ms, float alpha, float beta, bool pretranspose_b, + unsigned int num_threads) = 0; + /** Calculates the block sizes of a given strategy + * + * @param[in] ci CPUInfo to be used for kernel configuration. + * @param[in] params M, N, K sizes. + * + * @return BlockSizes for a given strategy + */ + virtual BlockSizes calculate_block_sizes_for_strategy(const CPUInfo &ci, const INEGEMMWrapperKernel::Params ¶ms) = 0; }; -#define DEFINE_STRATEGY_SUFFIX(strat, suffix) \ - using strategy = arm_gemm::strat; \ - static constexpr const char *name = #strat suffix; - -#define DEFINE_STRATEGY(strat) \ - DEFINE_STRATEGY_SUFFIX(strat, "") - -#ifdef __ARM_FEATURE_SVE -template <> -struct Kernel -{ - DEFINE_STRATEGY(interleaved_fp32_mla_3VLx8) -}; -template <> -struct Kernel -{ - DEFINE_STRATEGY(interleaved_fp16_mla_3VLx8) -}; -template -struct Kernel -{ - DEFINE_STRATEGY(interleaved_s8s32_dot_3VLx8) -}; -template -struct Kernel +/** Interleaved Strategy class */ +template +class InterleavedStrategy : public IInterleavedStrategy { - DEFINE_STRATEGY(interleaved_u8u32_dot_3VLx8) -}; -#else /* __ARM_FEATURE_SVE */ +public: + using strategy = StrategyType; -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template <> -struct Kernel -{ - DEFINE_STRATEGY(hgemm_24x8) -}; -#endif /*__ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -#ifdef __aarch64__ -template <> -struct Kernel -{ - DEFINE_STRATEGY(sgemm_12x8) -}; -template <> -struct Kernel -{ - DEFINE_STRATEGY(gemm_s8_4x4) -}; -template <> -struct Kernel -{ - DEFINE_STRATEGY(gemm_u8_4x4) -}; +public: + // Inherited methods overridden + std::unique_ptr instantiate_prepareB(const ITensor *b, + ITensor *transformed_b, + const INEGEMMWrapperKernel::Params ¶ms, + const CPUInfo &ci) override + { + auto prepare_b = support::cpp14::make_unique>(); + prepare_b->configure(b, transformed_b, false, ci, params); + return std::move(prepare_b); + } + std::unique_ptr instantiate_transformA(const ITensor *a, + ITensor *transformed_a, + const Window &block_walker, + const INEGEMMWrapperKernel::Params ¶ms) override + { + auto transform_a = support::cpp14::make_unique>(); + transform_a->configure(a, transformed_a, false, block_walker, params); + return std::move(transform_a); + } + std::unique_ptr instantiate_matrix_multiply(const ITensor *transformed_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, + const Window &block_walker, const BlockSizes &block_sizes, + const INEGEMMWrapperKernel::Params ¶ms, float alpha, float beta, bool pretranspose_b, + unsigned int num_threads) override + { + auto matrix_multiply = support::cpp14::make_unique>(); + matrix_multiply->configure(transformed_a, transformed_b, tmp_c, c, block_walker, block_sizes, params, pretranspose_b, alpha, beta, num_threads); + return std::move(matrix_multiply); + } -//Use different strategies for 8bit dot product: -template <> -struct Kernel -{ - DEFINE_STRATEGY_SUFFIX(gemm_s8_12x8, "_dot") + BlockSizes calculate_block_sizes_for_strategy(const CPUInfo &ci, const INEGEMMWrapperKernel::Params ¶ms) override + { + return calculate_block_sizes(ci, params.M, params.N, params.K); + } }; -template <> -struct Kernel -{ - DEFINE_STRATEGY_SUFFIX(gemm_u8_12x8, "_dot") -}; -#else -template <> -struct Kernel -{ - DEFINE_STRATEGY(sgemm_8x6) -}; -#endif /* __aarch64__ */ -#endif /* __ARM_FEATURE_SVE */ - -#undef DEFINE_STRATEGY -#undef DEFINE_STRATEGY_SUFFIX -} // namespace +/** Create the backend GEMM strategy to use given the provided kernel info + * + * @param[in] kernel_name Kernel name of the backend strategy to instantiate + * + * @return The requested kernel strategy if exists else nullptr + */ +std::unique_ptr create_strategy(const std::string &kernel_name) +{ +#if defined(__arm__) + if(kernel_name.find("sgemm_8x6") != std::string::npos) + { + return support::cpp14::make_unique>(); + } +#endif // defined(__arm__) +#if defined(__aarch64__) + if(kernel_name.find("gemm_s8_4x4") != std::string::npos) + { + return support::cpp14::make_unique>(); + } + if(kernel_name.find("gemm_s8_12x8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } + if(kernel_name.find("gemm_u8_4x4") != std::string::npos) + { + return support::cpp14::make_unique>(); + } + if(kernel_name.find("gemm_u8_12x8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + if(kernel_name.find("hgemm_24x8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } +#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + if(kernel_name.find("sgemm_12x8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } +#if defined(__ARM_FEATURE_SVE) + if(kernel_name.find("interleaved_fp16_mla_3VLx8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } + if(kernel_name.find("interleaved_fp32_mla_3VLx8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } + if(kernel_name.find("interleaved_s8s32_dot_3VLx8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } + if(kernel_name.find("interleaved_u8u32_dot_3VLx8") != std::string::npos) + { + return support::cpp14::make_unique>(); + } +#endif // defined(__ARM_FEATURE_SVE) +#endif // defined(__aarch64__)_ + return nullptr; +} +} // namespace detail } // namespace arm_compute #endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ */ diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp deleted file mode 100644 index 3b80a1f940..0000000000 --- a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h" - -#include "NEGEMMInterleavedStrategies.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/WindowIterator.h" - -#include "utils/TypePrinter.h" - -namespace arm_compute -{ -template -void NEGEMMInterleavedTransformAWrapperTemplate::configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker, - const INEGEMMWrapperKernel::Params ¶ms) -{ - _a = a; - _transformed_a = transformed_a; - _transpose_a = transpose_a; - _Ksize = params.K; - _Msize = params.M; - _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension -} - -template -void NEGEMMInterleavedTransformAWrapperTemplate::transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, - const Coordinates &end_offset) -{ - using strategy = typename Kernel::strategy; - - strategy strat(info.cpu_info); - TensorAccessor a(*_a); - TensorAccessor transformed_a(*_transformed_a); - - if(_a->info()->data_layout() == DataLayout::NHWC) - { - // In the case of NHWC we want to interpret the output shape as 3D. Thus, the batch stride for A is - // the relevant multiple of the row stride. - const size_t nhwc_batch_stride = _a->info()->strides_in_bytes().y() * _Msize; - a.set_stride(2, nhwc_batch_stride); - } - - unsigned int last_m = 0; - //TODO: Create a new iterate_1D( DimY); - int last_y = -1; - auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) - { - if(id.y() != last_y) - { - last_y = id.y(); - unsigned int batch = id.y(); - unsigned int first_m = id.x(); - - if(first_m >= last_m) - return; - - strat.transforms.PrepareA(transformed_a(0, first_m, batch), - a(0, 0, batch, wl._multi), - a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a); - } - }); - auto on_new_row_size = [&](unsigned int start, unsigned int end) - { - last_m = std::min(end, _Msize); - }; - window_iterator.iterate_2D(on_new_row_size); -} - -template -void NEGEMMInterleavedTransformAWrapperTemplate::create_workloads(std::vector &workloads) -{ - execute_window_loop(_k_multi_window, [&](const Coordinates & id) - { - const unsigned int k0 = id.x(); - const unsigned int multi = id.y(); - const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize); - - workloads.push_back(TransformAWorkload(k0, kmax, multi)); - }); -} - -template class NEGEMMInterleavedTransformAWrapperTemplate; -#ifdef __aarch64__ -template class NEGEMMInterleavedTransformAWrapperTemplate; -template class NEGEMMInterleavedTransformAWrapperTemplate; -template class NEGEMMInterleavedTransformAWrapperTemplate; -template class NEGEMMInterleavedTransformAWrapperTemplate; -#endif /* __aarch64__ */ - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -template class NEGEMMInterleavedTransformAWrapperTemplate; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp index e452dfbcf2..7b1f3e7ba0 100644 --- a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp +++ b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -34,11 +34,7 @@ #include "../arm_gemm/mergeresults.hpp" #include "../arm_gemm/transform.hpp" -#include "../arm_gemm/kernels/a32_sgemm_8x6.hpp" -#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp" #include "../arm_gemm/kernels/a64_sgemm_native_16x4.hpp" -#include "../arm_gemm/kernels/a64_sgemv_pretransposed.hpp" -#include "../arm_gemm/kernels/a64_sgemv_trans.hpp" namespace arm_compute { -- cgit v1.2.1