From 3d677ccee046cd384abf2142f323f8e9e7a4834f Mon Sep 17 00:00:00 2001 From: Anthony Barbier Date: Mon, 23 Jul 2018 16:42:59 +0100 Subject: COMPMID-1406: Refactor gemm_interleaved to use our own types and scheduler - Ported PrepareB kernel from gemm_interleave - Ported TransformA feature from gemm_interleave - Allocate reshaped a and b buffers - Added memory_manager / memory_group - MatrixMultiply kernel - Interleave kernels execution. - Fixed a few bugs: all nightly Convolution tests passing for threads=1 and threads=4 - Added Doxygen documentations and comments in the code - Added support for all data types supported Change-Id: Iffa1c09fda0bb9c61213bb83524d5a48e7ecb03c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141281 Tested-by: Jenkins Reviewed-by: Georgios Pinitas --- .../NEGEMMInterleavedMatrixMultiplyWrapper.cpp | 142 +++++++++++++++++ .../NEGEMMInterleavedPrepareBWrapperKernel.cpp | 170 +++++++++++++++++++++ .../kernels/assembly/NEGEMMInterleavedStrategies.h | 95 ++++++++++++ .../NEGEMMInterleavedTransformAWrapper.cpp | 118 ++++++++++++++ .../kernels/assembly/NEGEMMNativeWrapperKernel.cpp | 4 + 5 files changed, 529 insertions(+) create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h create mode 100644 src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp (limited to 'src/core/NEON/kernels/assembly') diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp new file mode 100644 index 0000000000..3d42f8a51f --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h" + +#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/WindowIterator.h" + +namespace arm_compute +{ +template +void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker, + const BlockSizes &block_sizes, const INEGEMMWrapperKernel::Params ¶ms, bool b_is_pretransposed, float alpha, float beta, unsigned int max_num_threads) +{ + using strategy = typename Kernel::strategy; + + _prepared_a = prepared_a; + _transformed_b = transformed_b; + _tmp_c = tmp_c; + _c = c; + _block_walker = block_walker; + _block_sizes = block_sizes; + _params = params; + _b_is_pretransposed = b_is_pretransposed; + _alpha = alpha; + _beta = beta; + + auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads })); +} + +template +void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, + const Coordinates &end_offset) +{ + using strategy = typename Kernel::strategy; + + strategy strat(info.cpu_info); + TensorAccessor prepared_a(*_prepared_a); + TensorAccessor transformed_b(*_transformed_b); + TensorAccessor c(*_c); + TensorAccessor tmp_c(*_tmp_c); + + int prev_batch = -1; + To *a_ptr = nullptr; + auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) + { + const unsigned int y = id.x(); + const unsigned int batch = id.y(); + const unsigned int ymax = std::min(_params.M, y + strategy::out_height()); + + // If it's the first block of a new batch then reset the pointer to A. + if(prev_batch != static_cast(batch)) + { + const unsigned int first_m = id.x(); + a_ptr = prepared_a(0, first_m, batch); + prev_batch = batch; + } + + // Call matrix multiply assembly routine to process the block: + strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k); + a_ptr += strategy::out_height() * wl._kern_k; + + // Merge the result with the other blocks' results: + strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast(1))); + }); + auto on_new_row_size = [&](unsigned int start, unsigned int end) + { + //Nothing to do + }; + window_iterator.iterate_2D(on_new_row_size); +} + +template +void NEGEMMInterleavedMatrixMultiplyWrapperTemplate::create_workloads(std::vector &workloads) +{ + using strategy = typename Kernel::strategy; + + unsigned int offset_transformed_b = 0; + execute_window_loop(_block_walker, [&](const Coordinates & id) + { + const unsigned int x0 = id.x(); + const unsigned int k0 = id.y(); + const unsigned int multi = id.z(); + + const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N); + const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K); + + // Figure out how many "K" the kernel will actually process. + const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll()); + const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width()); + + workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks)); + + if(_b_is_pretransposed) + { + offset_transformed_b += bblocks * strategy::out_width() * kern_k; + } + else + { + ARM_COMPUTE_ERROR("Not supported"); + } + }); +} + +//TODO: regroup somewhere ? +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +#ifdef __aarch64__ +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +#endif /* __aarch64__ */ + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template class NEGEMMInterleavedMatrixMultiplyWrapperTemplate; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp new file mode 100644 index 0000000000..f33a14f2af --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h" + +#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" + +namespace arm_compute +{ +namespace +{ +// Call the lambda function for each workload generated by the passed window. +template +void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda) +{ + using strategy = typename Kernel::strategy; + + unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes(); + execute_window_loop(window, [&](const Coordinates & coordinates) + { + const unsigned int x0 = coordinates.x(); + const unsigned int k0 = coordinates.y(); + const unsigned int multi = coordinates.z(); + + const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi)); + const unsigned int xmax = std::min(x0 + window.x().step(), N); + const unsigned int kmax = std::min(k0 + window.y().step(), K); + + /* Figure out the size of each block. */ + unsigned int x_size = (xmax - x0); + unsigned int k_size = (kmax - k0); + + /* Round sizes up as needed. */ + x_size = ceil_to_multiple(x_size, strategy::out_width()); + k_size = ceil_to_multiple(k_size, strategy::k_unroll()); + + lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax)); + + //Each workload represents one block: + offset_transformed_b += (x_size * k_size * sizeof(To)); + }); +} + +// Calculate the size of transformed_b: +template +unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs) +{ + using strategy = typename Kernel::strategy; + + // How many full blocks do N / K contain ? + size_t num_full_k = K / bs.k_block; + size_t num_full_x = N / bs.x_block; + + ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0); + ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0); + + size_t normal_x_size = bs.x_block; + size_t normal_k_size = bs.k_block; + + // Round up the leftovers to be a multiple of the strategy processing size: + size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width()); + size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll()); + + // Calculate the total size of the buffer: + size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size); + total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size); + total *= sizeof(To); + return total; +} + +} // namespace + +template +BlockSizes NEGEMMInterleavedPrepareBWrapperKernelTemplate::block_sizes() const +{ + return _block_sizes; +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params ¶ms) +{ + using strategy = typename Kernel::strategy; + + const unsigned int multis = b->info()->tensor_shape().z(); + _Nsize = b->info()->tensor_shape().x(); + _Ksize = b->info()->tensor_shape().y(); + _b = b; + _transformed_b = transformed_b; + _transpose_b = transpose_b; + + _block_sizes = calculate_block_sizes(ci, params.M, params.N, params.K); + + auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ get_B_pretransposed_array_size(_Nsize, _Ksize, _block_sizes) })); + + Window window; + window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block)); + window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block)); + window.set(Window::DimZ, Window::Dimension(0, multis)); + + INEKernel::configure(window); +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::transform(const PrepareBWorkload &wl, const ThreadInfo &info) +{ + using strategy = typename Kernel::strategy; + + strategy strat(info.cpu_info); + strat.transforms.PrepareB(reinterpret_cast(_transformed_b->buffer() + wl._offset_transformed_b), + reinterpret_cast(_b->buffer() + wl._offset_b), + _b->info()->strides_in_bytes().y() / sizeof(To), + wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b); +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::create_workloads(std::vector &workloads) +{ + for_each_element_in_window(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl) + { + workloads.push_back(std::move(wl)); + }); +} + +template +void NEGEMMInterleavedPrepareBWrapperKernelTemplate::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window()); + for_each_element_in_window(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl) + { + this->transform(wl, info); + }); +} + +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +#ifdef __aarch64__ +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +#endif /* __aarch64__ */ + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template class NEGEMMInterleavedPrepareBWrapperKernelTemplate; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h new file mode 100644 index 0000000000..26a8ade461 --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedStrategies.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ +#define __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ + +#include "../arm_gemm/utils.hpp" +#include "arm_gemm.hpp" + +#include "../arm_gemm/mergeresults.hpp" +#include "../arm_gemm/transform.hpp" + +#include "../arm_gemm/kernels/a32_sgemm_8x6.hpp" +#include "../arm_gemm/kernels/a64_gemm_s8_12x8.hpp" +#include "../arm_gemm/kernels/a64_gemm_s8_4x4.hpp" +#include "../arm_gemm/kernels/a64_gemm_u8_12x8.hpp" +#include "../arm_gemm/kernels/a64_gemm_u8_4x4.hpp" +#include "../arm_gemm/kernels/a64_hgemm_24x8.hpp" +#include "../arm_gemm/kernels/a64_sgemm_12x8.hpp" + +namespace arm_compute +{ +namespace +{ +template +struct Kernel +{ +}; + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template <> +struct Kernel +{ + using strategy = arm_gemm::hgemm_24x8; +}; +#endif /*__ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#ifdef __aarch64__ +template <> +struct Kernel +{ + using strategy = arm_gemm::sgemm_12x8; +}; +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_s8_4x4; +}; +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_u8_4x4; +}; + +//Use different strategies for 8bit dot product: +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_s8_12x8; +}; +template <> +struct Kernel +{ + using strategy = arm_gemm::gemm_u8_12x8; +}; +#else +template <> +struct Kernel +{ + using strategy = arm_gemm::sgemm_8x6; +}; +#endif /* __aarch64__ */ + +} // namespace +} // namespace arm_compute +#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDSTRATEGIES_H__ */ diff --git a/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp new file mode 100644 index 0000000000..3b80a1f940 --- /dev/null +++ b/src/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h" + +#include "NEGEMMInterleavedStrategies.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/WindowIterator.h" + +#include "utils/TypePrinter.h" + +namespace arm_compute +{ +template +void NEGEMMInterleavedTransformAWrapperTemplate::configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, const Window &block_walker, + const INEGEMMWrapperKernel::Params ¶ms) +{ + _a = a; + _transformed_a = transformed_a; + _transpose_a = transpose_a; + _Ksize = params.K; + _Msize = params.M; + _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension +} + +template +void NEGEMMInterleavedTransformAWrapperTemplate::transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, + const Coordinates &end_offset) +{ + using strategy = typename Kernel::strategy; + + strategy strat(info.cpu_info); + TensorAccessor a(*_a); + TensorAccessor transformed_a(*_transformed_a); + + if(_a->info()->data_layout() == DataLayout::NHWC) + { + // In the case of NHWC we want to interpret the output shape as 3D. Thus, the batch stride for A is + // the relevant multiple of the row stride. + const size_t nhwc_batch_stride = _a->info()->strides_in_bytes().y() * _Msize; + a.set_stride(2, nhwc_batch_stride); + } + + unsigned int last_m = 0; + //TODO: Create a new iterate_1D( DimY); + int last_y = -1; + auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) + { + if(id.y() != last_y) + { + last_y = id.y(); + unsigned int batch = id.y(); + unsigned int first_m = id.x(); + + if(first_m >= last_m) + return; + + strat.transforms.PrepareA(transformed_a(0, first_m, batch), + a(0, 0, batch, wl._multi), + a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a); + } + }); + auto on_new_row_size = [&](unsigned int start, unsigned int end) + { + last_m = std::min(end, _Msize); + }; + window_iterator.iterate_2D(on_new_row_size); +} + +template +void NEGEMMInterleavedTransformAWrapperTemplate::create_workloads(std::vector &workloads) +{ + execute_window_loop(_k_multi_window, [&](const Coordinates & id) + { + const unsigned int k0 = id.x(); + const unsigned int multi = id.y(); + const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize); + + workloads.push_back(TransformAWorkload(k0, kmax, multi)); + }); +} + +template class NEGEMMInterleavedTransformAWrapperTemplate; +#ifdef __aarch64__ +template class NEGEMMInterleavedTransformAWrapperTemplate; +template class NEGEMMInterleavedTransformAWrapperTemplate; +template class NEGEMMInterleavedTransformAWrapperTemplate; +template class NEGEMMInterleavedTransformAWrapperTemplate; +#endif /* __aarch64__ */ + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template class NEGEMMInterleavedTransformAWrapperTemplate; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +} // namespace arm_compute diff --git a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp index ea6a06cada..e452dfbcf2 100644 --- a/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp +++ b/src/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.cpp @@ -42,6 +42,8 @@ namespace arm_compute { +namespace +{ template struct Kernel { @@ -55,6 +57,8 @@ struct Kernel }; #endif /* __aarch64__ */ +} // namespace + template Window NEGEMMNativeWrapperKernel::configure_internal(float alpha, float beta) { -- cgit v1.2.1