From 3d677ccee046cd384abf2142f323f8e9e7a4834f Mon Sep 17 00:00:00 2001 From: Anthony Barbier Date: Mon, 23 Jul 2018 16:42:59 +0100 Subject: COMPMID-1406: Refactor gemm_interleaved to use our own types and scheduler - Ported PrepareB kernel from gemm_interleave - Ported TransformA feature from gemm_interleave - Allocate reshaped a and b buffers - Added memory_manager / memory_group - MatrixMultiply kernel - Interleave kernels execution. - Fixed a few bugs: all nightly Convolution tests passing for threads=1 and threads=4 - Added Doxygen documentations and comments in the code - Added support for all data types supported Change-Id: Iffa1c09fda0bb9c61213bb83524d5a48e7ecb03c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141281 Tested-by: Jenkins Reviewed-by: Georgios Pinitas --- .../NEGEMMInterleavedMatrixMultiplyWrapper.cpp | 142 +++++++++++ .../NEGEMMInterleavedPrepareBWrapperKernel.cpp | 170 ++++++++++++++ .../kernels/assembly/NEGEMMInterleavedStrategies.h | 95 ++++++++ .../NEGEMMInterleavedTransformAWrapper.cpp | 118 ++++++++++ .../kernels/assembly/NEGEMMNativeWrapperKernel.cpp | 4 + .../NEON/functions/NEGEMMAssemblyDispatch.cpp | 111 +++++++-- .../assembly/NEGEMMInterleavedWrapper.cpp | 260 +++++++++++++++++++++ 7 files changed, 882 insertions(+), 18 deletions(-) create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp create mode 100644 src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp (limited to 'src') diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp new file mode 100644 index 0000000000..3d42f8a51f --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h" + +#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/WindowIterator.h" + +namespace arm_compute +{ +template +void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker, + const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params ¶ms, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads) +{ + using strategy = typename Kernel::strategy; + + _prepared_a = prepared_a; + _transformed_b = transformed_b; + _tmp_c = tmp_c; + _c = c; + _block_walker = block_walker; + _block_sizes = block_sizes; + _params = params; + _b_is_pretransposed = b_is_pretransposed; + _alpha = alpha; + _beta = beta; + + auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads })); +} + +template +void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, + const Coordinates &end_offset) +{ + using strategy = typename Kernel::strategy; + + strategy strat(info.cpu_info); + TensorAccessor prepared_a(*_prepared_a); + TensorAccessor transformed_b(*_transformed_b); + TensorAccessor c(*_c); + TensorAccessor tmp_c(*_tmp_c); + + int prev_batch = -1; + To *a_ptr = nullptr; + auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) + { + const unsigned int y = id.x(); + const unsigned int batch = id.y(); + const unsigned int ymax = std::min(_params.M, y + strategy::out_height()); + + // If it's the first block of a new batch then reset the pointer to A. + if(prev_batch != static_cast(batch)) + { + const unsigned int first_m = id.x(); + a_ptr = prepared_a(0, first_m, batch); + prev_batch = batch; + } + + // Call matrix multiply assembly routine to process the block: + strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k); + a_ptr += strategy::out_height() * wl._kern_k; + + // Merge the result with the other blocks' results: + strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast(1))); + }); + auto on_new_row_size = [&](unsigned int start, unsigned int end) + { + //Nothing to do + }; + window_iterator.iterate_2D(on_new_row_size); +} + +template +void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::create_workloads(std::vector &workloads) +{ + using strategy = typename Kernel::strategy; + + unsigned int offset_transformed_b = 0; + execute_window_loop(_block_walker, [&](const Coordinates & id) + { + const unsigned int x0 = id.x(); + const unsigned int k0 = id.y(); + const unsigned int multi = id.z(); + + const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N); + const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K); + + // Figure out how many "K" the kernel will actually process. + const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll()); + const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width()); + + workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks)); + + if(_b_is_pretransposed) + { + offset_transformed_b += bblocks * strategy::out_width() * kern_k; + } + else + { + ARM_COMPUTE_ERROR("Not supported"); + } + }); +} + +//TODO: regroup somewhere ? +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +#ifdef __aarch64__ +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +#endif /* __aarch64__ */ + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp new file mode 100644 index 0000000000..f33a14f2af --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h" + +#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" + +namespace arm_compute +{ +namespace +{ +// Call the lambda function for each workload generated by the passed window. +template +void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda) +{ + using strategy = typename Kernel::strategy; + + unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes(); + execute_window_loop(window, [&](const Coordinates & coordinates) + { + const unsigned int x0 = coordinates.x(); + const unsigned int k0 = coordinates.y(); + const unsigned int multi = coordinates.z(); + + const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi)); + const unsigned int xmax = std::min(x0 + window.x().step(), N); + const unsigned int kmax = std::min(k0 + window.y().step(), K); + + /* Figure out the size of each block. */ + unsigned int x_size = (xmax - x0); + unsigned int k_size = (kmax - k0); + + /* Round sizes up as needed. */ + x_size = ceil_to_multiple(x_size, strategy::out_width()); + k_size = ceil_to_multiple(k_size, strategy::k_unroll()); + + lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax)); + + //Each workload represents one block: + offset_transformed_b += (x_size * k_size * sizeof(To)); + }); +} + +// Calculate the size of transformed_b: +template +unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs) +{ + using strategy = typename Kernel::strategy; + + // How many full blocks do N / K contain ? + size_t num_full_k = K / bs.k_block; + size_t num_full_x = N / bs.x_block; + + ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0); + ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0); + + size_t normal_x_size = bs.x_block; + size_t normal_k_size = bs.k_block; + + // Round up the leftovers to be a multiple of the strategy processing size: + size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width()); + size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll()); + + // Calculate the total size of the buffer: + size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size); + total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size); + total *= sizeof(To); + return total; +} + +} // namespace + +template +BlockSizes NEGEMMInterleavedPrepareBWrapperKernelTemplate::block_sizes() const +{ + return _block_sizes; +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params ¶ms) +{ + using strategy = typename Kernel::strategy; + + const unsigned int multis = b->info()->tensor_shape().z(); + _Nsize = b->info()->tensor_shape().x(); + _Ksize = b->info()->tensor_shape().y(); + _b = b; + _transformed_b = transformed_b; + _transpose_b = transpose_b; + + _block_sizes = calculate_block_sizes(ci, params.M, params.N, params.K); + + auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ get_B_pretransposed_array_size(_Nsize, _Ksize, _block_sizes) })); + + Window window; + window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block)); + window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block)); + window.set(Window::DimZ, Window::Dimension(0, multis)); + + INEKernel::configure(window); +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::transform(const PrepareBWorkload &wl, const ThreadInfo &info) +{ + using strategy = typename Kernel::strategy; + + strategy strat(info.cpu_info); + strat.transforms.PrepareB(reinterpret_cast(_transformed_b->buffer() + wl._offset_transformed_b), + reinterpret_cast(_b->buffer() + wl._offset_b), + _b->info()->strides_in_bytes().y() / sizeof(To), + wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b); +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::create_workloads(std::vector &workloads) +{ + for_each_element_in_window(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl) + { + workloads.push_back(std::move(wl)); + }); +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window()); + for_each_element_in_window(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl) + { + this->transform(wl, info); + }); +} + +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +#ifdef __aarch64__ +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +#endif /* __aarch64__ */ + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h new file mode 100644 index 0000000000..26a8ade461 --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ +#define __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ + +#include "../arm_gemm/utils.hpp" +#include "arm_gemm.hpp" + +#include "../arm_gemm/mergeresults.hpp" +#include "../arm_gemm/transform.hpp" + +#include "../arm_gemm/kernels/a32_sgemm_8x6.hpp" +#include "../arm_gemm/kernels/a64_gemm_s8_12x8.hpp" +#include "../arm_gemm/kernels/a64_gemm_s8_4x4.hpp" +#include "../arm_gemm/kernels/a64_gemm_u8_12x8.hpp" +#include "../arm_gemm/kernels/a64_gemm_u8_4x4.hpp" +#include "../arm_gemm/kernels/a64_hgemm_24x8.hpp" +#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp" + +namespace arm_compute +{ +namespace +{ +template +struct Kernel +{ +}; + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template <> +struct Kernel +{ + using strategy = arm_gemm::hgemm_24x8; +}; +#endif /*__ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#ifdef __aarch64__ +template <> +struct Kernel +{ + using strategy = arm_gemm::sgemm_12x8; +}; +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_s8_4x4; +}; +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_u8_4x4; +}; + +//Use different strategies for 8bit dot product: +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_s8_12x8; +}; +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_u8_12x8; +}; +#else +template <> +struct Kernel +{ + using strategy = arm_gemm::sgemm_8x6; +}; +#endif /* __aarch64__ */ + +} // namespace +} // namespace arm_compute +#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ */ diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp new file mode 100644 index 0000000000..3b80a1f940 --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h" + +#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/WindowIterator.h" + +#include "utils/TypePrinter.h" + +namespace arm_compute +{ +template +void NEGEMMInterleavedTransformAWrapperTemplate::configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker, + const INEGEMMWrapperKernel::Params ¶ms) +{ + _a = a; + _transformed_a = transformed_a; + _transpose_a = transpose_a; + _Ksize = params.K; + _Msize = params.M; + _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension +} + +template +void NEGEMMInterleavedTransformAWrapperTemplate::transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, + const Coordinates &end_offset) +{ + using strategy = typename Kernel::strategy; + + strategy strat(info.cpu_info); + TensorAccessor a(*_a); + TensorAccessor transformed_a(*_transformed_a); + + if(_a->info()->data_layout() == DataLayout::NHWC) + { + // In the case of NHWC we want to interpret the output shape as 3D. Thus, the batch stride for A is + // the relevant multiple of the row stride. + const size_t nhwc_batch_stride = _a->info()->strides_in_bytes().y() * _Msize; + a.set_stride(2, nhwc_batch_stride); + } + + unsigned int last_m = 0; + //TODO: Create a new iterate_1D( DimY); + int last_y = -1; + auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) + { + if(id.y() != last_y) + { + last_y = id.y(); + unsigned int batch = id.y(); + unsigned int first_m = id.x(); + + if(first_m >= last_m) + return; + + strat.transforms.PrepareA(transformed_a(0, first_m, batch), + a(0, 0, batch, wl._multi), + a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a); + } + }); + auto on_new_row_size = [&](unsigned int start, unsigned int end) + { + last_m = std::min(end, _Msize); + }; + window_iterator.iterate_2D(on_new_row_size); +} + +template +void NEGEMMInterleavedTransformAWrapperTemplate::create_workloads(std::vector &workloads) +{ + execute_window_loop(_k_multi_window, [&](const Coordinates & id) + { + const unsigned int k0 = id.x(); + const unsigned int multi = id.y(); + const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize); + + workloads.push_back(TransformAWorkload(k0, kmax, multi)); + }); +} + +template class NEGEMMInterleavedTransformAWrapperTemplate; +#ifdef __aarch64__ +template class NEGEMMInterleavedTransformAWrapperTemplate; +template class NEGEMMInterleavedTransformAWrapperTemplate; +template class NEGEMMInterleavedTransformAWrapperTemplate; +template class NEGEMMInterleavedTransformAWrapperTemplate; +#endif /* __aarch64__ */ + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template class NEGEMMInterleavedTransformAWrapperTemplate; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp index ea6a06cada..e452dfbcf2 100644 --- a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp +++ b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp @@ -42,6 +42,8 @@ namespace arm_compute { +namespace +{ template struct Kernel { @@ -55,6 +57,8 @@ struct Kernel }; #endif /* __aarch64__ */ +} // namespace + template Window NEGEMMNativeWrapperKernel::configure_internal(float alpha, float beta) { diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp index f17da7d2e4..8ba620fe51 100644 --- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp +++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp @@ -24,9 +24,13 @@ #include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" #include "arm_compute/core/CPP/Validate.h" +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h" +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h" +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h" #include "arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h" #include "arm_compute/runtime/NEON/NEScheduler.h" #include "arm_compute/runtime/NEON/functions/NESimpleAssemblyFunction.h" +#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h" #include @@ -34,8 +38,31 @@ namespace arm_compute { namespace { +std::unique_ptr create_function_all_types(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, + std::shared_ptr memory_manager) + +{ + //Note: It's safe to not check for FP16 support because this was already checked in NEGEMMAssemblyDispatch::configure() + switch(method) + { + case arm_gemm::GemmMethod::GEMM_INTERLEAVED: + { + if(!pretranspose_hint) + { + return nullptr; + } + auto function = support::cpp14::make_unique(memory_manager); + function->configure(a, b, d, alpha, beta, pretranspose_hint); + return std::move(function); + } + default: + return nullptr; + } +} + template -std::unique_ptr create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint) +std::unique_ptr create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, + std::shared_ptr memory_manager) { ARM_COMPUTE_UNUSED(method); ARM_COMPUTE_UNUSED(a); @@ -44,21 +71,63 @@ std::unique_ptr create_function(arm_gemm::GemmMethod method, const IT ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_UNUSED(beta); ARM_COMPUTE_UNUSED(pretranspose_hint); + ARM_COMPUTE_UNUSED(memory_manager); return nullptr; } + +#ifdef __aarch64__ template <> -std::unique_ptr create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint) +std::unique_ptr create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, + std::shared_ptr memory_manager) +{ + switch(method) + { + case arm_gemm::GemmMethod::GEMM_INTERLEAVED_DOT: + { + if(!pretranspose_hint) + { + return nullptr; + } + auto function = support::cpp14::make_unique(memory_manager); + function->configure(a, b, d, alpha, beta, pretranspose_hint, true /* use_dot */); + return std::move(function); + } + default: + return nullptr; + } + return nullptr; +} + +template <> +std::unique_ptr create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, + std::shared_ptr memory_manager) +{ + switch(method) + { + case arm_gemm::GemmMethod::GEMM_INTERLEAVED_DOT: + { + if(!pretranspose_hint) + { + return nullptr; + } + auto function = support::cpp14::make_unique(memory_manager); + function->configure(a, b, d, alpha, beta, pretranspose_hint, true /* use_dot */); + return std::move(function); + } + default: + return nullptr; + } + return nullptr; +} + +template <> +std::unique_ptr create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, + std::shared_ptr memory_manager) { - ARM_COMPUTE_UNUSED(method); - ARM_COMPUTE_UNUSED(a); - ARM_COMPUTE_UNUSED(b); - ARM_COMPUTE_UNUSED(d); - ARM_COMPUTE_UNUSED(alpha); - ARM_COMPUTE_UNUSED(beta); ARM_COMPUTE_UNUSED(pretranspose_hint); + ARM_COMPUTE_UNUSED(memory_manager); switch(method) { -#ifdef __aarch64__ case arm_gemm::GemmMethod::GEMM_NATIVE: { auto kernel = support::cpp14::make_unique>(); @@ -67,11 +136,11 @@ std::unique_ptr create_function(arm_gemm::GemmMethod me function->configure(std::move(kernel)); return std::move(function); } -#endif /* __aarch64__ */ default: return nullptr; } } +#endif /* __aarch64__ */ /** Fallback in case ACL doesn't have a function */ template @@ -173,11 +242,11 @@ void Fallback::prepare() // Pretranspose B if required if(_gemm_kernel_asm->B_pretranspose_required()) { + ARM_COMPUTE_ERROR_ON(_pretranspose.buffer() == nullptr); const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput); const auto in1_ptr = reinterpret_cast(_b->buffer() + _b->info()->offset_first_element_in_bytes()); const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput); - ARM_COMPUTE_ERROR_ON(_pretranspose.buffer() == nullptr); _gemm_kernel_asm->pretranspose_B_array(_pretranspose.buffer(), in1_ptr, ldb, multi_stride_b); _b->mark_as_unused(); } @@ -260,7 +329,7 @@ void Fallback::run() template void create_function_or_arm_gemm(std::unique_ptr &acl_function, std::unique_ptr &arm_gemm, MemoryGroup &memory_group, const ITensor *a, const ITensor *b, - ITensor *d, float alpha, float beta, bool pretranspose_hint) + ITensor *d, float alpha, float beta, bool pretranspose_hint, std::shared_ptr memory_manager) { INEGEMMWrapperKernel::Params p = INEGEMMWrapperKernel::extract_parameters(a, b, d); const CPUInfo &ci = NEScheduler::get().cpu_info(); @@ -269,7 +338,13 @@ void create_function_or_arm_gemm(std::unique_ptr &acl_function, std:: arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, alpha, beta, num_threads, pretranspose_hint); //Try to create an ACL function: - acl_function = create_function(arm_gemm::get_gemm_method(args), a, b, d, alpha, beta, pretranspose_hint); + acl_function = create_function_all_types(arm_gemm::get_gemm_method(args), a, b, d, alpha, beta, pretranspose_hint, memory_manager); + // If the type agnostic factory failed to create an ACL function, try the specialised one: + if(acl_function == nullptr) + { + acl_function = create_function(arm_gemm::get_gemm_method(args), a, b, d, alpha, beta, pretranspose_hint, memory_manager); + } + //If we still don't have an ACL function: if(acl_function == nullptr) { //Fallback onto arm_gemm function if ACL doesn't support this method. @@ -282,7 +357,7 @@ void create_function_or_arm_gemm(std::unique_ptr &acl_function, std:: } //namespace NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr memory_manager) - : _function(nullptr), _arm_gemm(nullptr), _memory_group(std::move(memory_manager)) + : _function(nullptr), _arm_gemm(nullptr), _memory_group(memory_manager), _memory_manager(memory_manager) { } @@ -321,20 +396,20 @@ void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, ITens switch(a->info()->data_type()) { case DataType::F32: - create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint); + create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager); break; #ifdef __aarch64__ case DataType::U8: case DataType::QASYMM8: - create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint); + create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager); break; case DataType::S8: - create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint); + create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager); break; #endif /* __aarch64__ */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: - create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint); + create_function_or_arm_gemm(_function, _arm_gemm, _memory_group, a, b, d, alpha, beta, pretranspose_hint, _memory_manager); break; #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ default: diff --git a/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp b/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp new file mode 100644 index 0000000000..434723ca1a --- /dev/null +++ b/src/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h" + +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h" +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h" +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" + +namespace arm_compute +{ +NEGEMMInterleavedWrapper::NEGEMMInterleavedWrapper(std::shared_ptr memory_manager) + : _memory_group(std::move(memory_manager)) +{ +} +void NEGEMMInterleavedWrapper::run() +{ + prepare(); + + _memory_group.acquire(); + NEScheduler::get().run_workloads(_workloads); + _memory_group.release(); +} + +void NEGEMMInterleavedWrapper::prepare() +{ + if(!_is_prepared) + { + if(_pretranspose_b) + { + NEScheduler::get().schedule(_prepare_b.get(), Window::DimX); + _b->mark_as_unused(); + } + else + { + _prepare_b->create_workloads(_b_workloads); + } + _transform_a->create_workloads(_a_workloads); + _matrix_multiply->create_workloads(_mm_workloads); + + //Maximum number of workloads to create: + const unsigned int num_threads = NEScheduler::get().num_threads(); + const unsigned int max_iterations = num_threads == 1 ? 1 : num_threads * 4; + //Maximum number of iterations the parameters allow: + const unsigned int num_iterations = _batch_window.num_iterations_total(); + // Keep the smallest of the two: + const unsigned int num_windows = std::min(num_iterations, max_iterations); + const TensorShape window_shape = _batch_window.shape(); + + // Create a 1D window to dynamically split the batch window: + Window win_1D; + win_1D.set(0, Window::Dimension(0, num_iterations)); + + // Create one workload for each sub-window: + for(unsigned int w = 0; w < num_windows; w++) + { + Window win = win_1D.split_window(0, w, num_windows); + const Coordinates start_offset = index2coords(window_shape, win.x().start()); + const Coordinates end_offset = index2coords(window_shape, win.x().end() - 1); + const unsigned int num_x_blocks = _block_walker.num_iterations(Window::DimX); + + auto workload = [start_offset, end_offset, num_x_blocks, this](const ThreadInfo & info) + { + //For each block of rows in "M" + auto workload_mm = this->_mm_workloads.begin(); + for(auto workload_a = this->_a_workloads.begin(); workload_a != this->_a_workloads.end(); workload_a++) + { + // Transform one k_block from A: + this->_transform_a->transform(*workload_a, info, this->_batch_window, start_offset, end_offset); + // Then perform the matrix multiplication for each x block along N: + for(unsigned int i = 0; i < num_x_blocks; i++) + { + ARM_COMPUTE_ERROR_ON(workload_mm == this->_mm_workloads.end()); + this->_matrix_multiply->transform(*workload_mm++, info, this->_batch_window, start_offset, end_offset); + } + } + }; + _workloads.push_back(workload); + } + + _is_prepared = true; + } +} + +namespace +{ +// Factory to instantiate NEGEMMInterleavedPrepareBWrapperKernel: +template +std::unique_ptr instantiate_prepareB(const ITensor *b, ITensor *transformed_b, const INEGEMMWrapperKernel::Params ¶ms) +{ + auto prepare_b = support::cpp14::make_unique>(); + prepare_b->configure(b, transformed_b, false, NEScheduler::get().cpu_info(), params); + return std::move(prepare_b); +} + +// Factory to instantiate NEGEMMInterleavedTransformAWrapperTemplate: +template +std::unique_ptr instantiate_transformA(const ITensor *a, ITensor *transformed_a, const Window &block_walker, const INEGEMMWrapperKernel::Params ¶ms) +{ + auto transform_a = support::cpp14::make_unique>(); + transform_a->configure(a, transformed_a, false, block_walker, params); + return std::move(transform_a); +} + +// Factory to instantiate NEGEMMInterleavedTransformAWrapperTemplate: +template +std::unique_ptr instantiate_matrix_multiply(const ITensor *transformed_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker, + const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params ¶ms, bool pretranspose_b, float alpha, float beta) +{ + auto matrix_multiply = support::cpp14::make_unique>(); + matrix_multiply->configure(transformed_a, transformed_b, tmp_c, c, block_walker, block_sizes, params, pretranspose_b, alpha, beta, NEScheduler::get().num_threads()); + return std::move(matrix_multiply); +} +} // namespace + +void NEGEMMInterleavedWrapper::configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta, bool pretranspose_b, bool use_dot) +{ + _params = INEGEMMWrapperKernel::extract_parameters(a, b, c); + _a = a; + _b = b; + _c = c; + _pretranspose_b = pretranspose_b; + + DataType input_type = a->info()->data_type(); + + // Forcing 128-byte alignment (required by 32-bit kernels) + const unsigned int alignment = 128; + _transformed_b.allocator()->init(TensorInfo{}, alignment); + _tmp_c.allocator()->init(TensorInfo{}, alignment); + if(!_pretranspose_b) + { + // If B is transposed at every iteration then transformed_B can be managed: + _memory_group.manage(&_transformed_b); + } + switch(input_type) + { + case DataType::F32: + _prepare_b = instantiate_prepareB(_b, &_transformed_b, _params); + break; +#ifdef __aarch64__ + case DataType::U8: + case DataType::QASYMM8: + if(use_dot) + { + _prepare_b = instantiate_prepareB(_b, &_transformed_b, _params); + } + else + { + _prepare_b = instantiate_prepareB(_b, &_transformed_b, _params); + } + break; + case DataType::S8: + if(use_dot) + { + _prepare_b = instantiate_prepareB(_b, &_transformed_b, _params); + } + else + { + _prepare_b = instantiate_prepareB(_b, &_transformed_b, _params); + } + break; +#endif /* __aarch64__ */ +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + _prepare_b = instantiate_prepareB<__fp16>(_b, &_transformed_b, _params); + break; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + default: + ARM_COMPUTE_ERROR("DataType not supported"); + break; + } + ARM_COMPUTE_ERROR_ON(_prepare_b == nullptr); + + _block_sizes = _prepare_b->block_sizes(); + + _block_walker.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_params.N, _block_sizes.x_block), _block_sizes.x_block)); + _block_walker.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_params.K, _block_sizes.k_block), _block_sizes.k_block)); + _block_walker.set(Window::DimZ, Window::Dimension(0, _params.multis)); + + _batch_window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_block_sizes.m_round, _block_sizes.strategy_out_height), _block_sizes.strategy_out_height)); + _batch_window.set(Window::DimY, Window::Dimension(0, _params.batches)); + + _transformed_a.allocator()->init(TensorInfo(TensorShape{ _block_sizes.k_block, _block_sizes.m_round, _params.batches }, 1, input_type), alignment); + _memory_group.manage(&_transformed_a); + _memory_group.manage(&_tmp_c); + + switch(input_type) + { + case DataType::F32: + _transform_a = instantiate_transformA(_a, &_transformed_a, _block_walker, _params); + _matrix_multiply = instantiate_matrix_multiply(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta); + break; +#ifdef __aarch64__ + case DataType::U8: + case DataType::QASYMM8: + if(use_dot) + { + _transform_a = instantiate_transformA(_a, &_transformed_a, _block_walker, _params); + _matrix_multiply = instantiate_matrix_multiply(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta); + } + else + { + _transform_a = instantiate_transformA(_a, &_transformed_a, _block_walker, _params); + _matrix_multiply = instantiate_matrix_multiply(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta); + } + break; + case DataType::S8: + if(use_dot) + { + _transform_a = instantiate_transformA(_a, &_transformed_a, _block_walker, _params); + _matrix_multiply = instantiate_matrix_multiply(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta); + } + else + { + _transform_a = instantiate_transformA(_a, &_transformed_a, _block_walker, _params); + _matrix_multiply = instantiate_matrix_multiply(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta); + } + break; +#endif /* __aarch64__ */ +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + _transform_a = instantiate_transformA<__fp16>(_a, &_transformed_a, _block_walker, _params); + _matrix_multiply = instantiate_matrix_multiply<__fp16, __fp16>(&_transformed_a, &_transformed_b, &_tmp_c, c, _block_walker, _block_sizes, _params, pretranspose_b, alpha, beta); + break; + break; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + default: + break; + } + ARM_COMPUTE_ERROR_ON(_transform_a == nullptr); + ARM_COMPUTE_ERROR_ON(_matrix_multiply == nullptr); + _transformed_a.allocator()->allocate(); + _tmp_c.allocator()->allocate(); + _transformed_b.allocator()->allocate(); +} +} // namespace arm_compute -- cgit v1.2.1