From 48b3ef89de5f21a0169d8416e3d54081f82c7bf8 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 14 Oct 2019 19:03:09 +0100 Subject: COMPMID-2577: Fuse bias addition and activation in gemm assembly kernels Change-Id: I7f52112d2d05b1ea3d3f3d4b19b8eafab05d6c44 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2141 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez --- arm_compute/core/NEON/kernels/assembly/Helpers.h | 6 +- .../NEGEMMInterleavedMatrixMultiplyWrapper.h | 233 ------------------- .../NEGEMMInterleavedPrepareBWrapperKernel.h | 251 --------------------- .../assembly/NEGEMMInterleavedTransformAWrapper.h | 173 -------------- .../kernels/assembly/NEGEMMNativeWrapperKernel.h | 52 ----- .../core/NEON/kernels/assembly/arm_gemm.hpp | 34 ++- .../core/NEON/kernels/assembly/gemm_common.hpp | 23 +- 7 files changed, 43 insertions(+), 729 deletions(-) delete mode 100644 arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h delete mode 100644 arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h delete mode 100644 arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h delete mode 100644 arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h (limited to 'arm_compute/core') diff --git a/arm_compute/core/NEON/kernels/assembly/Helpers.h b/arm_compute/core/NEON/kernels/assembly/Helpers.h index e2a46e96a3..092ce400d1 100644 --- a/arm_compute/core/NEON/kernels/assembly/Helpers.h +++ b/arm_compute/core/NEON/kernels/assembly/Helpers.h @@ -47,8 +47,7 @@ struct BlockSizes * @param[in] ci CPU information. * @param[in] num_threads Maximum number of threads that might be used for the calculations. * @param[in] p M, N, K sizes. - * @param[in] alpha Alpha value. - * @param[in] beta Beta value. + * @param[in] activation Activation struct * @param[in] pretranspose_hint Is B also pretransposed ? * * @return Kernel description that the assembly heuristics picked for the given configuration @@ -57,8 +56,7 @@ arm_gemm::KernelDescription get_gemm_info(DataType in const CPUInfo &ci, const unsigned int num_threads, const INEGEMMWrapperKernel::Params &p, - float alpha, - float beta, + arm_gemm::Activation activation, bool pretranspose_hint); /** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h deleted file mode 100644 index 641f88ee5f..0000000000 --- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__ -#define __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__ - -#include "arm_compute/core/NEON/kernels/assembly/Helpers.h" - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" -#include "arm_compute/core/WindowIterator.h" - -namespace arm_compute -{ -class ITensor; - -/** Unit of work for @ref NEGEMMInterleavedMatrixMultiplyWrapper to process */ -struct MatrixMultiplyWorkload -{ - /** Constructor - * - * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation. - * @param[in] x0 First value to process along the X dimension (N). - * @param[in] xmax Last value to process along the X dimension (N). - * @param[in] k0 First value to process along the K dimension. - * @param[in] kmax Last value to process along the K dimension. - * @param[in] multi Multi index. - * @param[in] kern_k Number of elements along K actually processed by the kernel. - * @param[in] bblocks Number of x_block processed by the kernel. - */ - MatrixMultiplyWorkload(unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax, unsigned int multi, int kern_k, int bblocks) - : _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax), _multi(multi), _kern_k(kern_k), _bblocks(bblocks) - { - } - unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/ - unsigned int _x0; /**< First value to process along the X dimension (N). */ - unsigned int _xmax; /**< Last value to process along the X dimension (N). */ - unsigned int _k0; /**< First value to process along the K dimension. */ - unsigned int _kmax; /**< Last value to process along the K dimension. */ - unsigned int _multi; /**< Multi index. */ - int _kern_k; /**< Number of elements along K actually processed by the kernel. */ - int _bblocks; /**< Number of x_block processed by the kernel. */ -}; - -/** Common interface for the templated wrappers around the matrix multiply NEON assembly implementations */ -class NEGEMMInterleavedMatrixMultiplyWrapper -{ -public: - /** Transform the block at the given coordinates - * - * @param[in] wl Workload to process. - * @param[in] info Information about the current thread. - * @param[in] batch_window Window containing iteration information for the M and batch dimensions. - * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from. - * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing. - */ - virtual void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0; - /** Generate an array of workloads - * - * @param[out] workloads Container to store the generated workloads. - */ - virtual void create_workloads(std::vector &workloads) = 0; - /** Default destructor */ - virtual ~NEGEMMInterleavedMatrixMultiplyWrapper() = default; -}; - -/** Equivalent to arm_gemm::GemmInterleaved's strategy::kernel() but using Compute Library types. */ -template -class NEGEMMInterleavedMatrixMultiplyWrapperTemplate : public NEGEMMInterleavedMatrixMultiplyWrapper -{ -public: - /** Configure the matrix multiplication: C = alpha * A * B + beta * C - * - * @param[in] prepared_a Already reshaped matrix A. - * @param[in] transformed_b Already reshaped matrix B. - * @param[out] tmp_c Temporary buffer to be used to store intermediate results. - * @param[in,out] c Result matrix C. - * @param[in] block_walker Window containing iteration information for the M and batch dimensions. - * @param[in] block_sizes Block sizes to use for the matrix multiplication (A & B must have been reshaped using these same block sizes). - * @param[in] params M, N, K sizes. - * @param[in] gemm_info GEMM meta-data - * @param[in] alpha Alpha value - * @param[in] beta Beta value - * @param[in] max_num_threads Maximum number of threads that might be used for the calculations. - */ - void configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker, const BlockSizes &block_sizes, - const INEGEMMWrapperKernel::Params ¶ms, const GEMMInfo &gemm_info, float alpha, float beta, unsigned int max_num_threads) - { - _prepared_a = prepared_a; - _transformed_b = transformed_b; - _tmp_c = tmp_c; - _c = c; - _block_walker = block_walker; - _block_sizes = block_sizes; - _params = params; - _b_is_pretransposed = gemm_info.pretranpose_B(); - _reinterpret_c_as_3d = gemm_info.depth_output_gemm3d() != 0; - _alpha = alpha; - _beta = beta; - - auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads })); - } - - // Inherited methods overridden: - void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override - { - strategy strat(info.cpu_info); - TensorAccessor prepared_a(*_prepared_a); - TensorAccessor transformed_b(*_transformed_b); - TensorAccessor c(*_c); - TensorAccessor tmp_c(*_tmp_c); - - // Handle 3d output re-interpretation - if(_reinterpret_c_as_3d) - { - Strides c_strides_as_3d = _c->info()->strides_in_bytes(); - c_strides_as_3d.remove(Window::DimZ); - c.set_strides(c_strides_as_3d); - } - - int prev_batch = -1; - typename strategy::operand_type *a_ptr = nullptr; - auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) - { - const unsigned int y = id.x(); - const unsigned int batch = id.y(); - const unsigned int ymax = std::min(_params.M, y + strategy::out_height()); - - // If it's the first block of a new batch then reset the pointer to A. - if(prev_batch != static_cast(batch)) - { - const unsigned int first_m = id.x(); - a_ptr = prepared_a(0, first_m, batch); - prev_batch = batch; - } - - // Call matrix multiply assembly routine to process the block: - strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k); - a_ptr += strategy::out_height() * wl._kern_k; - - // Merge the result with the other blocks' results: - strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast(1))); - }); - auto on_new_row_size = [&](unsigned int, unsigned int) - { - //Nothing to do - }; - window_iterator.iterate_2D(on_new_row_size); - } - void create_workloads(std::vector &workloads) override - { - unsigned int offset_transformed_b = 0; - unsigned int wl_index = 0; - unsigned int num_buffers = 0, reshaped_block_size = 0; - - if(!_b_is_pretransposed) - { - num_buffers = _transformed_b->info()->tensor_shape()[1]; - reshaped_block_size = _transformed_b->info()->tensor_shape()[0]; - } - execute_window_loop(_block_walker, [&](const Coordinates & id) - { - const unsigned int x0 = id.x(); - const unsigned int k0 = id.y(); - const unsigned int multi = id.z(); - - const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N); - const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K); - - // Figure out how many "K" the kernel will actually process. - const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll()); - const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width()); - - workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks)); - - if(_b_is_pretransposed) - { - offset_transformed_b += bblocks * strategy::out_width() * kern_k; - } - else - { - // Rotate through the BufferManager's buffers: - wl_index++; - offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size; - } - }); - } - -private: - const ITensor *_prepared_a - { - nullptr - }; - const ITensor *_transformed_b{ nullptr }; - ITensor *_tmp_c{ nullptr }; - ITensor *_c{ nullptr }; - unsigned int _Nsize{ 0 }; - unsigned int _Ksize{ 0 }; - bool _transpose_b{ false }; - BlockSizes _block_sizes{}; - INEGEMMWrapperKernel::Params _params{}; - Window _block_walker{}; - bool _b_is_pretransposed{ false }; - bool _reinterpret_c_as_3d{ false }; - typename strategy::result_type _alpha{}; - typename strategy::result_type _beta{}; -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__ */ diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h deleted file mode 100644 index ba3223f66d..0000000000 --- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__ -#define __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__ - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/INEKernel.h" -#include "arm_compute/core/NEON/kernels/assembly/Helpers.h" -#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" - -namespace arm_compute -{ -/** Unit of work for @ref NEGEMMInterleavedPrepareBWrapperKernel to process */ -struct PrepareBWorkload -{ - /** Constructor - * - * @param[in] offset_b Offset from the start of b's allocation - * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation. - * @param[in] x0 First value to process along the X dimension (N). - * @param[in] xmax Last value to process along the X dimension (N). - * @param[in] k0 First value to process along the K dimension. - * @param[in] kmax Last value to process along the K dimension. - */ - PrepareBWorkload(unsigned int offset_b, unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax) - : _offset_b(offset_b), _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax) - { - } - unsigned int _offset_b; /**< Offset from the start of b's allocation.*/ - unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/ - unsigned int _x0; /**< First value to process along the X dimension (N). */ - unsigned int _xmax; /**< Last value to process along the X dimension (N). */ - unsigned int _k0; /**< First value to process along the K dimension. */ - unsigned int _kmax; /**< Last value to process along the K dimension. */ -}; - -namespace detail -{ -// Call the lambda function for each workload generated by the passed window. -template -void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda) -{ - unsigned int wl_index = 0; - unsigned int num_buffers = 0, reshaped_block_size = 0; - - if(use_buffer_manager) - { - num_buffers = transformed_b->info()->tensor_shape()[1]; - reshaped_block_size = transformed_b->info()->strides_in_bytes().y(); - } - - unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes(); - execute_window_loop(window, [&](const Coordinates & coordinates) - { - const unsigned int x0 = coordinates.x(); - const unsigned int k0 = coordinates.y(); - const unsigned int multi = coordinates.z(); - - const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi)); - const unsigned int xmax = std::min(x0 + window.x().step(), N); - const unsigned int kmax = std::min(k0 + window.y().step(), K); - - /* Figure out the size of each block. */ - unsigned int x_size = (xmax - x0); - unsigned int k_size = (kmax - k0); - - /* Round sizes up as needed. */ - x_size = ceil_to_multiple(x_size, strategy::out_width()); - k_size = ceil_to_multiple(k_size, strategy::k_unroll()); - - lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax)); - - //Each workload represents one block: - if(use_buffer_manager) - { - // Rotate through the BufferManager's buffers: - wl_index++; - offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size; - } - else - { - offset_transformed_b += (x_size * k_size * sizeof(typename strategy::operand_type)); - } - }); -} - -// Calculate the size of transformed_b: -template -unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs, unsigned int multis) -{ - // How many full blocks do N / K contain ? - size_t num_full_k = K / bs.k_block; - size_t num_full_x = N / bs.x_block; - - ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0); - ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0); - - size_t normal_x_size = bs.x_block; - size_t normal_k_size = bs.k_block; - - // Round up the leftovers to be a multiple of the strategy processing size: - size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width()); - size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll()); - - // Calculate the total size of the buffer: - size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size); - total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size); - - total *= multis; - - return total; -} -} // namespace detail - -/** Common interface for the templated wrappers around the B reshape NEON assembly implementations */ -class NEGEMMInterleavedPrepareBWrapperKernel : public INEKernel -{ -public: - /** Transform the block at the given coordinates - * - * @param[in] wl Workload to process. - * @param[in] info Information about the current thread. - */ - virtual void transform(const PrepareBWorkload &wl, const ThreadInfo &info) = 0; - /** Generate an array of workloads - * - * @param[out] workloads Container to store the generated workloads. - */ - virtual void create_workloads(std::vector &workloads) = 0; - /** Return the block_sizes used to resape B - * - * The same block sizes must be used to reshape A and for the matrix multiplication - * - * @return The block sizes used to reshape B. - */ - virtual BlockSizes block_sizes() const = 0; - - // Inherited methods overridden: - const char *name() const override - { - return "NEGEMMInterleavedPrepareBWrapperKernel"; - } - - bool is_parallelisable() const override - { - return false; // Can't run on arbitrary windows but can be parallelised using an array of workloads - } -}; - -/** Equivalent to arm_gemm::GemmInterleaved's strategy::transforms::PrepareB() but using Compute Library types. - */ -template -class NEGEMMInterleavedPrepareBWrapperKernelTemplate : public NEGEMMInterleavedPrepareBWrapperKernel -{ -public: - /** Configure the reshape B routine. - * - * @param[in] b Input matrix B. - * @param[out] transformed_b Reshaped matrix B. - * @param[in] transpose_b Also transpose B ? - * @param[in] ci CPU information - * @param[in] params M, N, K sizes. - */ - void configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params ¶ms) - { - const unsigned int multis = b->info()->tensor_shape().z(); - _Nsize = b->info()->tensor_shape().x(); - _Ksize = b->info()->tensor_shape().y(); - _b = b; - _transformed_b = transformed_b; - _transpose_b = transpose_b; - - _block_sizes = calculate_block_sizes(ci, params.M, params.N, params.K); - - auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ detail::get_B_pretransposed_array_size(_Nsize, _Ksize, _block_sizes, multis) })); - - Window window; - window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block)); - window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block)); - window.set(Window::DimZ, Window::Dimension(0, multis)); - - INEKernel::configure(window); - } - - // Inherited methods overridden: - void transform(const PrepareBWorkload &wl, const ThreadInfo &info) override - { - strategy strat(info.cpu_info); - strat.transforms.PrepareB(reinterpret_cast(_transformed_b->buffer() + wl._offset_transformed_b), - reinterpret_cast(_b->buffer() + wl._offset_b), - _b->info()->strides_in_bytes().y() / sizeof(typename strategy::operand_type), - wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b); - } - void create_workloads(std::vector &workloads) override - { - detail::for_each_element_in_window(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl) - { - workloads.push_back(std::move(wl)); - }); - } - void run(const Window &window, const ThreadInfo &info) override - { - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window()); - detail::for_each_element_in_window(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl) - { - this->transform(wl, info); - }); - } - BlockSizes block_sizes() const override - { - return _block_sizes; - } - -private: - const ITensor *_b - { - nullptr - }; - ITensor *_transformed_b{ nullptr }; - unsigned int _Nsize{ 0 }; - unsigned int _Ksize{ 0 }; - bool _transpose_b{ false }; - BlockSizes _block_sizes{}; -}; - -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h deleted file mode 100644 index c1fd86e453..0000000000 --- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ -#define __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ - -#include "arm_compute/core/CPP/CPPTypes.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" -#include "arm_compute/core/WindowIterator.h" - -namespace arm_compute -{ -class ITensor; - -/** Unit of work for @ref NEGEMMInterleavedTransformAWrapper to process */ -struct TransformAWorkload -{ - /** Constructor - * - * @param[in] k0 First value to process along the K dimension. - * @param[in] kmax Last value to process along the K dimension. - * @param[in] multi Multi index. - */ - TransformAWorkload(unsigned int k0, unsigned int kmax, unsigned int multi) - : _k0(k0), _kmax(kmax), _multi(multi) - { - } - unsigned int _k0; /**< First value to process along the K dimension. */ - unsigned int _kmax; /**< Last value to process along the K dimension. */ - unsigned int _multi; /**< Multi index. */ -}; - -/** Equivalent to arm_gemm::GemmInterleaved's Transform &workloads) = 0; - /** Default destructor */ - virtual ~NEGEMMInterleavedTransformAWrapper() = default; -}; - -/** Type specialisations of @ref NEGEMMInterleavedTransformAWrapper */ -template -class NEGEMMInterleavedTransformAWrapperTemplate : public NEGEMMInterleavedTransformAWrapper -{ -public: - /** Configure the reshape A routine. - * - * @param[in] a Input matrix A. - * @param[out] transformed_a Reshaped matrix A. - * @param[in] transpose_a Also transpose A ? - * @param[in] reinterpret_a_as_3d Re-interpret as 3D ? - * @param[in] block_walker Window representing the layout of the matrix's blocks - * @param[in] params M, N, K sizes. - */ - void configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, bool reinterpret_a_as_3d, const Window &block_walker, const INEGEMMWrapperKernel::Params ¶ms) - { - _a = a; - _transformed_a = transformed_a; - _transpose_a = transpose_a; - _reinterpret_a_as_3d = reinterpret_a_as_3d; - _Ksize = params.K; - _Msize = params.M; - _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension - } - - // Inherited methods overridden: - void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override - { - strategy strat(info.cpu_info); - TensorAccessor a(*_a); - TensorAccessor transformed_a(*_transformed_a); - - // Handle 3d input re-interpretation - if(_reinterpret_a_as_3d) - { - Strides a_strides_as_3d = _a->info()->strides_in_bytes(); - a_strides_as_3d.remove(Window::DimZ); - a.set_strides(a_strides_as_3d); - } - - unsigned int last_m = 0; - //TODO: Create a new iterate_1D( DimY); - int last_y = -1; - auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id) - { - if(id.y() != last_y) - { - last_y = id.y(); - unsigned int batch = id.y(); - unsigned int first_m = id.x(); - - if(first_m >= last_m) - return; - - strat.transforms.PrepareA(transformed_a(0, first_m, batch), - a(0, 0, batch, wl._multi), - a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a); - } - }); - auto on_new_row_size = [&](unsigned int, unsigned int end) - { - last_m = std::min(end, _Msize); - }; - window_iterator.iterate_2D(on_new_row_size); - } - void create_workloads(std::vector &workloads) override - { - execute_window_loop(_k_multi_window, [&](const Coordinates & id) - { - const unsigned int k0 = id.x(); - const unsigned int multi = id.y(); - const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize); - - workloads.push_back(TransformAWorkload(k0, kmax, multi)); - }); - } - -private: - const ITensor *_a - { - nullptr - }; - ITensor *_transformed_a{ nullptr }; - unsigned int _Msize{ 0 }; - unsigned int _Ksize{ 0 }; - bool _transpose_a{ false }; - bool _reinterpret_a_as_3d{ false }; - Window _k_multi_window{}; -}; -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ */ diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h deleted file mode 100644 index 73a0d7f05f..0000000000 --- a/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__ -#define __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__ - -#include "INEGEMMWrapperKernel.h" - -namespace arm_compute -{ -/** Equivalent to arm_gemm::GemmNative but using Compute Library types. - */ -template -class NEGEMMNativeWrapperKernel : public INEGEMMWrapperKernel -{ -public: - const char *name() const override - { - return "NEGEMMNativeWrapperKernel"; - } - -protected: - // Inherited methods overridden: - Window configure_internal(float alpha, float beta) override; - void run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info) override; - -private: - Tr _beta{}; -}; - -} // namespace arm_compute -#endif /* __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp index 828b0f20a7..17faab18fd 100644 --- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp +++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp @@ -65,7 +65,21 @@ struct GemmConfig GemmConfig() { } }; -template +struct Activation +{ + enum class Type { + None, + ReLU, + BoundedReLU + }; + + Type type; + float param1; + float param2; + + Activation(Type type=Type::None, float p1=0.0f, float p2=0.0f) : type(type), param1(p1), param2(p2) { } +}; + struct GemmArgs { public: @@ -77,8 +91,7 @@ public: unsigned int _nmulti; bool _trA; bool _trB; - T _alpha; - T _beta; + Activation _act; int _maxthreads; bool _pretransposed_hint; const GemmConfig *_cfg; @@ -86,10 +99,10 @@ public: GemmArgs(const CPUInfo *ci, const unsigned int M, const unsigned int N, const unsigned int K, const unsigned int nbatches, const unsigned int nmulti, const bool trA, const bool trB, - const T alpha, const T beta, const int maxthreads, + Activation act, const int maxthreads, const bool pretransposed_hint, const GemmConfig *cfg=nullptr ) : _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti), - _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads), + _trA(trA), _trB(trB), _act(act), _maxthreads(maxthreads), _pretransposed_hint(pretransposed_hint), _cfg(cfg) { } @@ -99,6 +112,7 @@ struct ARequantizeLayer32 { public: const int32_t *bias; + size_t bias_multi_stride; int32_t a_offset; int32_t b_offset; int32_t c_offset; @@ -109,8 +123,8 @@ public: ARequantizeLayer32() = default; - ARequantizeLayer32(int32_t *b, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) : - bias(b), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv) + ARequantizeLayer32(const int32_t *b, size_t bms, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) : + bias(b), bias_multi_stride(bms), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv) { } }; @@ -128,12 +142,12 @@ using UniqueGemmCommon = std::unique_ptr >; /* get_gemm_method(): Given the templated types and provided parameters, * which is the preferred method to implement this GEMM? */ template -KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & ={}); +KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & ={}); template -UniqueGemmCommon gemm(const GemmArgs &args, const OutputStage & ={}); +UniqueGemmCommon gemm(const GemmArgs &args, const OutputStage & ={}); template -std::vector get_compatible_kernels(const GemmArgs &args, const OutputStage & ={}); +std::vector get_compatible_kernels(const GemmArgs &args, const OutputStage & ={}); } // namespace arm_gemm diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp index 1ae503cddb..d17fd5fe97 100644 --- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp +++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp @@ -48,7 +48,8 @@ public: */ virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, + const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0; /* For threading, we divide the work into some number of units and work * out internally what unit corresponds to what work. This returns the @@ -97,7 +98,11 @@ public: /*** "Quantized bias" interface (optional) ***/ /* Set the bias vector for quantized GEMMs */ - virtual void set_quantized_bias(const int32_t *bias) { UNUSED(bias); } + virtual void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) + { + UNUSED(bias); + UNUSED(bias_multi_stride); + } // Destructor virtual ~IGemmCommon() { } @@ -125,13 +130,16 @@ protected: int _ldc=0; int _C_batch_stride=0; int _C_multi_stride=0; + const Tr *_bias=nullptr; + int _bias_multi_stride=0; public: /* Pass in the pointers to the arrays to be operated on and their * strides (templated version with appropriate types). */ virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, /* batches share B */ const int B_multi_stride, - Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { + Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, + const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride) { _Aptr = A; _lda = lda; _A_batch_stride = A_batch_stride; @@ -143,15 +151,19 @@ public: _ldc = ldc; _C_batch_stride = C_batch_stride; _C_multi_stride = C_multi_stride; + _bias = bias; + _bias_multi_stride = bias_multi_stride; } /* Implementation of the void * overload which casts its arguments to the appropriate type. */ void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, + const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override { set_arrays(static_cast(A), lda, A_batch_stride, A_multi_stride, static_cast(B), ldb, B_multi_stride, - static_cast(C), ldc, C_batch_stride, C_multi_stride); + static_cast(C), ldc, C_batch_stride, C_multi_stride, + static_cast(bias), bias_multi_stride); } /*** "Pretransposed" interface ***/ @@ -164,7 +176,6 @@ public: void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override { pretranspose_B_array(out, static_cast(in), row_stride, multi_stride); } - }; } // namespace arm_gemm -- cgit v1.2.1