aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-10-14 19:03:09 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-10-23 12:08:12 +0000
commit48b3ef89de5f21a0169d8416e3d54081f82c7bf8 (patch)
treef857d733ccf446c704823dc7ac796a96eb55095e /arm_compute
parent1dce3101ef8d77c8cf0af7dfd4af6595a0136b91 (diff)
downloadComputeLibrary-48b3ef89de5f21a0169d8416e3d54081f82c7bf8.tar.gz
COMPMID-2577: Fuse bias addition and activation in gemm assembly kernels
Change-Id: I7f52112d2d05b1ea3d3f3d4b19b8eafab05d6c44 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2141 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/assembly/Helpers.h6
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h233
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h251
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h173
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h52
-rw-r--r--arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp34
-rw-r--r--arm_compute/core/NEON/kernels/assembly/gemm_common.hpp23
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMM.h36
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h40
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h4
-rw-r--r--arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h147
12 files changed, 85 insertions, 920 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/Helpers.h b/arm_compute/core/NEON/kernels/assembly/Helpers.h
index e2a46e96a3..092ce400d1 100644
--- a/arm_compute/core/NEON/kernels/assembly/Helpers.h
+++ b/arm_compute/core/NEON/kernels/assembly/Helpers.h
@@ -47,8 +47,7 @@ struct BlockSizes
* @param[in] ci CPU information.
* @param[in] num_threads Maximum number of threads that might be used for the calculations.
* @param[in] p M, N, K sizes.
- * @param[in] alpha Alpha value.
- * @param[in] beta Beta value.
+ * @param[in] activation Activation struct
* @param[in] pretranspose_hint Is B also pretransposed ?
*
* @return Kernel description that the assembly heuristics picked for the given configuration
@@ -57,8 +56,7 @@ arm_gemm::KernelDescription get_gemm_info(DataType in
const CPUInfo &ci,
const unsigned int num_threads,
const INEGEMMWrapperKernel::Params &p,
- float alpha,
- float beta,
+ arm_gemm::Activation activation,
bool pretranspose_hint);
/** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h
deleted file mode 100644
index 641f88ee5f..0000000000
--- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__
-#define __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__
-
-#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/WindowIterator.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Unit of work for @ref NEGEMMInterleavedMatrixMultiplyWrapper to process */
-struct MatrixMultiplyWorkload
-{
- /** Constructor
- *
- * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation.
- * @param[in] x0 First value to process along the X dimension (N).
- * @param[in] xmax Last value to process along the X dimension (N).
- * @param[in] k0 First value to process along the K dimension.
- * @param[in] kmax Last value to process along the K dimension.
- * @param[in] multi Multi index.
- * @param[in] kern_k Number of elements along K actually processed by the kernel.
- * @param[in] bblocks Number of x_block processed by the kernel.
- */
- MatrixMultiplyWorkload(unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax, unsigned int multi, int kern_k, int bblocks)
- : _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax), _multi(multi), _kern_k(kern_k), _bblocks(bblocks)
- {
- }
- unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/
- unsigned int _x0; /**< First value to process along the X dimension (N). */
- unsigned int _xmax; /**< Last value to process along the X dimension (N). */
- unsigned int _k0; /**< First value to process along the K dimension. */
- unsigned int _kmax; /**< Last value to process along the K dimension. */
- unsigned int _multi; /**< Multi index. */
- int _kern_k; /**< Number of elements along K actually processed by the kernel. */
- int _bblocks; /**< Number of x_block processed by the kernel. */
-};
-
-/** Common interface for the templated wrappers around the matrix multiply NEON assembly implementations */
-class NEGEMMInterleavedMatrixMultiplyWrapper
-{
-public:
- /** Transform the block at the given coordinates
- *
- * @param[in] wl Workload to process.
- * @param[in] info Information about the current thread.
- * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
- * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from.
- * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing.
- */
- virtual void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0;
- /** Generate an array of workloads
- *
- * @param[out] workloads Container to store the generated workloads.
- */
- virtual void create_workloads(std::vector<MatrixMultiplyWorkload> &workloads) = 0;
- /** Default destructor */
- virtual ~NEGEMMInterleavedMatrixMultiplyWrapper() = default;
-};
-
-/** Equivalent to arm_gemm::GemmInterleaved's strategy::kernel() but using Compute Library types. */
-template <typename strategy>
-class NEGEMMInterleavedMatrixMultiplyWrapperTemplate : public NEGEMMInterleavedMatrixMultiplyWrapper
-{
-public:
- /** Configure the matrix multiplication: C = alpha * A * B + beta * C
- *
- * @param[in] prepared_a Already reshaped matrix A.
- * @param[in] transformed_b Already reshaped matrix B.
- * @param[out] tmp_c Temporary buffer to be used to store intermediate results.
- * @param[in,out] c Result matrix C.
- * @param[in] block_walker Window containing iteration information for the M and batch dimensions.
- * @param[in] block_sizes Block sizes to use for the matrix multiplication (A & B must have been reshaped using these same block sizes).
- * @param[in] params M, N, K sizes.
- * @param[in] gemm_info GEMM meta-data
- * @param[in] alpha Alpha value
- * @param[in] beta Beta value
- * @param[in] max_num_threads Maximum number of threads that might be used for the calculations.
- */
- void configure(const ITensor *prepared_a, const ITensor *transformed_b, ITensor *tmp_c, ITensor *c, const Window &block_walker, const BlockSizes &block_sizes,
- const INEGEMMWrapperKernel::Params &params, const GEMMInfo &gemm_info, float alpha, float beta, unsigned int max_num_threads)
- {
- _prepared_a = prepared_a;
- _transformed_b = transformed_b;
- _tmp_c = tmp_c;
- _c = c;
- _block_walker = block_walker;
- _block_sizes = block_sizes;
- _params = params;
- _b_is_pretransposed = gemm_info.pretranpose_B();
- _reinterpret_c_as_3d = gemm_info.depth_output_gemm3d() != 0;
- _alpha = alpha;
- _beta = beta;
-
- auto_init_if_empty(*_tmp_c->info(), c->info()->clone()->set_tensor_shape(TensorShape{ _block_sizes.x_block * strategy::out_height(), max_num_threads }));
- }
-
- // Inherited methods overridden:
- void transform(const MatrixMultiplyWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override
- {
- strategy strat(info.cpu_info);
- TensorAccessor<typename strategy::operand_type> prepared_a(*_prepared_a);
- TensorAccessor<typename strategy::operand_type> transformed_b(*_transformed_b);
- TensorAccessor<typename strategy::result_type> c(*_c);
- TensorAccessor<typename strategy::result_type> tmp_c(*_tmp_c);
-
- // Handle 3d output re-interpretation
- if(_reinterpret_c_as_3d)
- {
- Strides c_strides_as_3d = _c->info()->strides_in_bytes();
- c_strides_as_3d.remove(Window::DimZ);
- c.set_strides(c_strides_as_3d);
- }
-
- int prev_batch = -1;
- typename strategy::operand_type *a_ptr = nullptr;
- auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
- {
- const unsigned int y = id.x();
- const unsigned int batch = id.y();
- const unsigned int ymax = std::min(_params.M, y + strategy::out_height());
-
- // If it's the first block of a new batch then reset the pointer to A.
- if(prev_batch != static_cast<int>(batch))
- {
- const unsigned int first_m = id.x();
- a_ptr = prepared_a(0, first_m, batch);
- prev_batch = batch;
- }
-
- // Call matrix multiply assembly routine to process the block:
- strat.kernel(a_ptr, transformed_b(wl._offset_transformed_b), tmp_c(0, info.thread_id), 1, wl._bblocks, wl._kern_k);
- a_ptr += strategy::out_height() * wl._kern_k;
-
- // Merge the result with the other blocks' results:
- strat.transforms.Merge(c(0, 0, batch, wl._multi), tmp_c(0, info.thread_id), c.stride(1), y, ymax, wl._x0, wl._xmax, _alpha, (wl._k0 == 0 ? _beta : static_cast<typename strategy::result_type>(1)));
- });
- auto on_new_row_size = [&](unsigned int, unsigned int)
- {
- //Nothing to do
- };
- window_iterator.iterate_2D(on_new_row_size);
- }
- void create_workloads(std::vector<MatrixMultiplyWorkload> &workloads) override
- {
- unsigned int offset_transformed_b = 0;
- unsigned int wl_index = 0;
- unsigned int num_buffers = 0, reshaped_block_size = 0;
-
- if(!_b_is_pretransposed)
- {
- num_buffers = _transformed_b->info()->tensor_shape()[1];
- reshaped_block_size = _transformed_b->info()->tensor_shape()[0];
- }
- execute_window_loop(_block_walker, [&](const Coordinates & id)
- {
- const unsigned int x0 = id.x();
- const unsigned int k0 = id.y();
- const unsigned int multi = id.z();
-
- const unsigned int xmax = std::min(x0 + _block_walker.x().step(), _params.N);
- const unsigned int kmax = std::min(k0 + _block_walker.y().step(), _params.K);
-
- // Figure out how many "K" the kernel will actually process.
- const int kern_k = ceil_to_multiple(kmax - k0, strategy::k_unroll());
- const int bblocks = DIV_CEIL(xmax - x0, strategy::out_width());
-
- workloads.push_back(MatrixMultiplyWorkload(offset_transformed_b, x0, xmax, k0, kmax, multi, kern_k, bblocks));
-
- if(_b_is_pretransposed)
- {
- offset_transformed_b += bblocks * strategy::out_width() * kern_k;
- }
- else
- {
- // Rotate through the BufferManager's buffers:
- wl_index++;
- offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size;
- }
- });
- }
-
-private:
- const ITensor *_prepared_a
- {
- nullptr
- };
- const ITensor *_transformed_b{ nullptr };
- ITensor *_tmp_c{ nullptr };
- ITensor *_c{ nullptr };
- unsigned int _Nsize{ 0 };
- unsigned int _Ksize{ 0 };
- bool _transpose_b{ false };
- BlockSizes _block_sizes{};
- INEGEMMWrapperKernel::Params _params{};
- Window _block_walker{};
- bool _b_is_pretransposed{ false };
- bool _reinterpret_c_as_3d{ false };
- typename strategy::result_type _alpha{};
- typename strategy::result_type _beta{};
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDMATRIXMULTIPLYWRAPPER_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
deleted file mode 100644
index ba3223f66d..0000000000
--- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
-#define __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/NEON/INEKernel.h"
-#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
-#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-
-namespace arm_compute
-{
-/** Unit of work for @ref NEGEMMInterleavedPrepareBWrapperKernel to process */
-struct PrepareBWorkload
-{
- /** Constructor
- *
- * @param[in] offset_b Offset from the start of b's allocation
- * @param[in] offset_transformed_b Offset from the start of transformed_b's allocation.
- * @param[in] x0 First value to process along the X dimension (N).
- * @param[in] xmax Last value to process along the X dimension (N).
- * @param[in] k0 First value to process along the K dimension.
- * @param[in] kmax Last value to process along the K dimension.
- */
- PrepareBWorkload(unsigned int offset_b, unsigned int offset_transformed_b, unsigned int x0, unsigned int xmax, unsigned int k0, unsigned int kmax)
- : _offset_b(offset_b), _offset_transformed_b(offset_transformed_b), _x0(x0), _xmax(xmax), _k0(k0), _kmax(kmax)
- {
- }
- unsigned int _offset_b; /**< Offset from the start of b's allocation.*/
- unsigned int _offset_transformed_b; /**< Offset from the start of transformed_b's allocation.*/
- unsigned int _x0; /**< First value to process along the X dimension (N). */
- unsigned int _xmax; /**< Last value to process along the X dimension (N). */
- unsigned int _k0; /**< First value to process along the K dimension. */
- unsigned int _kmax; /**< Last value to process along the K dimension. */
-};
-
-namespace detail
-{
-// Call the lambda function for each workload generated by the passed window.
-template <typename strategy, bool use_buffer_manager, typename Lambda>
-void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda)
-{
- unsigned int wl_index = 0;
- unsigned int num_buffers = 0, reshaped_block_size = 0;
-
- if(use_buffer_manager)
- {
- num_buffers = transformed_b->info()->tensor_shape()[1];
- reshaped_block_size = transformed_b->info()->strides_in_bytes().y();
- }
-
- unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes();
- execute_window_loop(window, [&](const Coordinates & coordinates)
- {
- const unsigned int x0 = coordinates.x();
- const unsigned int k0 = coordinates.y();
- const unsigned int multi = coordinates.z();
-
- const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi));
- const unsigned int xmax = std::min(x0 + window.x().step(), N);
- const unsigned int kmax = std::min(k0 + window.y().step(), K);
-
- /* Figure out the size of each block. */
- unsigned int x_size = (xmax - x0);
- unsigned int k_size = (kmax - k0);
-
- /* Round sizes up as needed. */
- x_size = ceil_to_multiple(x_size, strategy::out_width());
- k_size = ceil_to_multiple(k_size, strategy::k_unroll());
-
- lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax));
-
- //Each workload represents one block:
- if(use_buffer_manager)
- {
- // Rotate through the BufferManager's buffers:
- wl_index++;
- offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size;
- }
- else
- {
- offset_transformed_b += (x_size * k_size * sizeof(typename strategy::operand_type));
- }
- });
-}
-
-// Calculate the size of transformed_b:
-template <typename strategy>
-unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs, unsigned int multis)
-{
- // How many full blocks do N / K contain ?
- size_t num_full_k = K / bs.k_block;
- size_t num_full_x = N / bs.x_block;
-
- ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0);
- ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0);
-
- size_t normal_x_size = bs.x_block;
- size_t normal_k_size = bs.k_block;
-
- // Round up the leftovers to be a multiple of the strategy processing size:
- size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width());
- size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll());
-
- // Calculate the total size of the buffer:
- size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size);
- total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size);
-
- total *= multis;
-
- return total;
-}
-} // namespace detail
-
-/** Common interface for the templated wrappers around the B reshape NEON assembly implementations */
-class NEGEMMInterleavedPrepareBWrapperKernel : public INEKernel
-{
-public:
- /** Transform the block at the given coordinates
- *
- * @param[in] wl Workload to process.
- * @param[in] info Information about the current thread.
- */
- virtual void transform(const PrepareBWorkload &wl, const ThreadInfo &info) = 0;
- /** Generate an array of workloads
- *
- * @param[out] workloads Container to store the generated workloads.
- */
- virtual void create_workloads(std::vector<PrepareBWorkload> &workloads) = 0;
- /** Return the block_sizes used to resape B
- *
- * The same block sizes must be used to reshape A and for the matrix multiplication
- *
- * @return The block sizes used to reshape B.
- */
- virtual BlockSizes block_sizes() const = 0;
-
- // Inherited methods overridden:
- const char *name() const override
- {
- return "NEGEMMInterleavedPrepareBWrapperKernel";
- }
-
- bool is_parallelisable() const override
- {
- return false; // Can't run on arbitrary windows but can be parallelised using an array of workloads
- }
-};
-
-/** Equivalent to arm_gemm::GemmInterleaved's strategy::transforms::PrepareB() but using Compute Library types.
- */
-template <typename strategy>
-class NEGEMMInterleavedPrepareBWrapperKernelTemplate : public NEGEMMInterleavedPrepareBWrapperKernel
-{
-public:
- /** Configure the reshape B routine.
- *
- * @param[in] b Input matrix B.
- * @param[out] transformed_b Reshaped matrix B.
- * @param[in] transpose_b Also transpose B ?
- * @param[in] ci CPU information
- * @param[in] params M, N, K sizes.
- */
- void configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params)
- {
- const unsigned int multis = b->info()->tensor_shape().z();
- _Nsize = b->info()->tensor_shape().x();
- _Ksize = b->info()->tensor_shape().y();
- _b = b;
- _transformed_b = transformed_b;
- _transpose_b = transpose_b;
-
- _block_sizes = calculate_block_sizes<strategy>(ci, params.M, params.N, params.K);
-
- auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ detail::get_B_pretransposed_array_size<strategy>(_Nsize, _Ksize, _block_sizes, multis) }));
-
- Window window;
- window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block));
- window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block));
- window.set(Window::DimZ, Window::Dimension(0, multis));
-
- INEKernel::configure(window);
- }
-
- // Inherited methods overridden:
- void transform(const PrepareBWorkload &wl, const ThreadInfo &info) override
- {
- strategy strat(info.cpu_info);
- strat.transforms.PrepareB(reinterpret_cast<typename strategy::operand_type *>(_transformed_b->buffer() + wl._offset_transformed_b),
- reinterpret_cast<typename strategy::operand_type *>(_b->buffer() + wl._offset_b),
- _b->info()->strides_in_bytes().y() / sizeof(typename strategy::operand_type),
- wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b);
- }
- void create_workloads(std::vector<PrepareBWorkload> &workloads) override
- {
- detail::for_each_element_in_window<strategy, true>(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl)
- {
- workloads.push_back(std::move(wl));
- });
- }
- void run(const Window &window, const ThreadInfo &info) override
- {
- ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window());
- detail::for_each_element_in_window<strategy, false>(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl)
- {
- this->transform(wl, info);
- });
- }
- BlockSizes block_sizes() const override
- {
- return _block_sizes;
- }
-
-private:
- const ITensor *_b
- {
- nullptr
- };
- ITensor *_transformed_b{ nullptr };
- unsigned int _Nsize{ 0 };
- unsigned int _Ksize{ 0 };
- bool _transpose_b{ false };
- BlockSizes _block_sizes{};
-};
-
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h
deleted file mode 100644
index c1fd86e453..0000000000
--- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__
-#define __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__
-
-#include "arm_compute/core/CPP/CPPTypes.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/WindowIterator.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Unit of work for @ref NEGEMMInterleavedTransformAWrapper to process */
-struct TransformAWorkload
-{
- /** Constructor
- *
- * @param[in] k0 First value to process along the K dimension.
- * @param[in] kmax Last value to process along the K dimension.
- * @param[in] multi Multi index.
- */
- TransformAWorkload(unsigned int k0, unsigned int kmax, unsigned int multi)
- : _k0(k0), _kmax(kmax), _multi(multi)
- {
- }
- unsigned int _k0; /**< First value to process along the K dimension. */
- unsigned int _kmax; /**< Last value to process along the K dimension. */
- unsigned int _multi; /**< Multi index. */
-};
-
-/** Equivalent to arm_gemm::GemmInterleaved's Transform<strategy::A_interleave, strategy::A_block but using Compute Library types.
- *
- * Note: Each workload converts a different slice of a and writes it to transformed_a (Which can store only one slice at the time), therefore the workloads' execution should be interleaved with other workloads that make use of their result.
- */
-class NEGEMMInterleavedTransformAWrapper
-{
-public:
- /** Transform the block at the given coordinates
- *
- * @param[in] wl Workload to process.
- * @param[in] info Information about the current thread.
- * @param[in] batch_window Window containing iteration information for the M and batch dimensions.
- * @param[in] start_offset Offset relative to the beginning of batch_window to start the processing from.
- * @param[in] end_offset Offset relative to the beginning of batch_window to stop the processing.
- */
- virtual void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) = 0;
- /** Generate an array of workloads
- *
- * @param[out] workloads Container to store the generated workloads.
- */
- virtual void create_workloads(std::vector<TransformAWorkload> &workloads) = 0;
- /** Default destructor */
- virtual ~NEGEMMInterleavedTransformAWrapper() = default;
-};
-
-/** Type specialisations of @ref NEGEMMInterleavedTransformAWrapper */
-template <typename strategy>
-class NEGEMMInterleavedTransformAWrapperTemplate : public NEGEMMInterleavedTransformAWrapper
-{
-public:
- /** Configure the reshape A routine.
- *
- * @param[in] a Input matrix A.
- * @param[out] transformed_a Reshaped matrix A.
- * @param[in] transpose_a Also transpose A ?
- * @param[in] reinterpret_a_as_3d Re-interpret as 3D ?
- * @param[in] block_walker Window representing the layout of the matrix's blocks
- * @param[in] params M, N, K sizes.
- */
- void configure(const ITensor *a, ITensor *transformed_a, bool transpose_a, bool reinterpret_a_as_3d, const Window &block_walker, const INEGEMMWrapperKernel::Params &params)
- {
- _a = a;
- _transformed_a = transformed_a;
- _transpose_a = transpose_a;
- _reinterpret_a_as_3d = reinterpret_a_as_3d;
- _Ksize = params.K;
- _Msize = params.M;
- _k_multi_window = block_walker.shift_dimensions(1); // block_walker contains (M,K,Multi) --> shift by 1 to get rid of the "M" dimension
- }
-
- // Inherited methods overridden:
- void transform(const TransformAWorkload &wl, const ThreadInfo &info, const Window &batch_window, const Coordinates &start_offset, const Coordinates &end_offset) override
- {
- strategy strat(info.cpu_info);
- TensorAccessor<typename strategy::operand_type> a(*_a);
- TensorAccessor<typename strategy::operand_type> transformed_a(*_transformed_a);
-
- // Handle 3d input re-interpretation
- if(_reinterpret_a_as_3d)
- {
- Strides a_strides_as_3d = _a->info()->strides_in_bytes();
- a_strides_as_3d.remove(Window::DimZ);
- a.set_strides(a_strides_as_3d);
- }
-
- unsigned int last_m = 0;
- //TODO: Create a new iterate_1D( DimY);
- int last_y = -1;
- auto window_iterator = arm_compute::create_window_iterator(batch_window, start_offset, end_offset, [&](const Coordinates & id)
- {
- if(id.y() != last_y)
- {
- last_y = id.y();
- unsigned int batch = id.y();
- unsigned int first_m = id.x();
-
- if(first_m >= last_m)
- return;
-
- strat.transforms.PrepareA(transformed_a(0, first_m, batch),
- a(0, 0, batch, wl._multi),
- a.stride(1), first_m, last_m, wl._k0, wl._kmax, _transpose_a);
- }
- });
- auto on_new_row_size = [&](unsigned int, unsigned int end)
- {
- last_m = std::min(end, _Msize);
- };
- window_iterator.iterate_2D(on_new_row_size);
- }
- void create_workloads(std::vector<TransformAWorkload> &workloads) override
- {
- execute_window_loop(_k_multi_window, [&](const Coordinates & id)
- {
- const unsigned int k0 = id.x();
- const unsigned int multi = id.y();
- const unsigned int kmax = std::min(k0 + _k_multi_window.x().step(), _Ksize);
-
- workloads.push_back(TransformAWorkload(k0, kmax, multi));
- });
- }
-
-private:
- const ITensor *_a
- {
- nullptr
- };
- ITensor *_transformed_a{ nullptr };
- unsigned int _Msize{ 0 };
- unsigned int _Ksize{ 0 };
- bool _transpose_a{ false };
- bool _reinterpret_a_as_3d{ false };
- Window _k_multi_window{};
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDTRANSFORMAWRAPPER_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h
deleted file mode 100644
index 73a0d7f05f..0000000000
--- a/arm_compute/core/NEON/kernels/assembly/NEGEMMNativeWrapperKernel.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__
-#define __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__
-
-#include "INEGEMMWrapperKernel.h"
-
-namespace arm_compute
-{
-/** Equivalent to arm_gemm::GemmNative but using Compute Library types.
- */
-template <typename To, typename Tr>
-class NEGEMMNativeWrapperKernel : public INEGEMMWrapperKernel
-{
-public:
- const char *name() const override
- {
- return "NEGEMMNativeWrapperKernel";
- }
-
-protected:
- // Inherited methods overridden:
- Window configure_internal(float alpha, float beta) override;
- void run_internal(const Window &window, const Coordinates &start_offset, const Coordinates &end_offset, const ThreadInfo &info) override;
-
-private:
- Tr _beta{};
-};
-
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEGEMMNATIVEWRAPPERKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
index 828b0f20a7..17faab18fd 100644
--- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp
@@ -65,7 +65,21 @@ struct GemmConfig
GemmConfig() { }
};
-template<typename T>
+struct Activation
+{
+ enum class Type {
+ None,
+ ReLU,
+ BoundedReLU
+ };
+
+ Type type;
+ float param1;
+ float param2;
+
+ Activation(Type type=Type::None, float p1=0.0f, float p2=0.0f) : type(type), param1(p1), param2(p2) { }
+};
+
struct GemmArgs
{
public:
@@ -77,8 +91,7 @@ public:
unsigned int _nmulti;
bool _trA;
bool _trB;
- T _alpha;
- T _beta;
+ Activation _act;
int _maxthreads;
bool _pretransposed_hint;
const GemmConfig *_cfg;
@@ -86,10 +99,10 @@ public:
GemmArgs(const CPUInfo *ci, const unsigned int M, const unsigned int N,
const unsigned int K, const unsigned int nbatches,
const unsigned int nmulti, const bool trA, const bool trB,
- const T alpha, const T beta, const int maxthreads,
+ Activation act, const int maxthreads,
const bool pretransposed_hint, const GemmConfig *cfg=nullptr ) :
_ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti),
- _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads),
+ _trA(trA), _trB(trB), _act(act), _maxthreads(maxthreads),
_pretransposed_hint(pretransposed_hint), _cfg(cfg)
{
}
@@ -99,6 +112,7 @@ struct ARequantizeLayer32
{
public:
const int32_t *bias;
+ size_t bias_multi_stride;
int32_t a_offset;
int32_t b_offset;
int32_t c_offset;
@@ -109,8 +123,8 @@ public:
ARequantizeLayer32() = default;
- ARequantizeLayer32(int32_t *b, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) :
- bias(b), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv)
+ ARequantizeLayer32(const int32_t *b, size_t bms, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) :
+ bias(b), bias_multi_stride(bms), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv)
{
}
};
@@ -128,12 +142,12 @@ using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret> >;
/* get_gemm_method(): Given the templated types and provided parameters,
* which is the preferred method to implement this GEMM? */
template<typename Top, typename Tret, class OutputStage = Nothing>
-KernelDescription get_gemm_method(const GemmArgs<Tret> &args, const OutputStage & ={});
+KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & ={});
template<typename Top, typename Tret, class OutputStage = Nothing>
-UniqueGemmCommon<Top, Tret> gemm(const GemmArgs<Tret> &args, const OutputStage & ={});
+UniqueGemmCommon<Top, Tret> gemm(const GemmArgs &args, const OutputStage & ={});
template<typename Top, typename Tret, class OutputStage = Nothing>
-std::vector<KernelDescription> get_compatible_kernels(const GemmArgs<Tret> &args, const OutputStage & ={});
+std::vector<KernelDescription> get_compatible_kernels(const GemmArgs &args, const OutputStage & ={});
} // namespace arm_gemm
diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
index 1ae503cddb..d17fd5fe97 100644
--- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp
@@ -48,7 +48,8 @@ public:
*/
virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
- void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0;
+ void *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
+ const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0;
/* For threading, we divide the work into some number of units and work
* out internally what unit corresponds to what work. This returns the
@@ -97,7 +98,11 @@ public:
/*** "Quantized bias" interface (optional) ***/
/* Set the bias vector for quantized GEMMs */
- virtual void set_quantized_bias(const int32_t *bias) { UNUSED(bias); }
+ virtual void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride)
+ {
+ UNUSED(bias);
+ UNUSED(bias_multi_stride);
+ }
// Destructor
virtual ~IGemmCommon() { }
@@ -125,13 +130,16 @@ protected:
int _ldc=0;
int _C_batch_stride=0;
int _C_multi_stride=0;
+ const Tr *_bias=nullptr;
+ int _bias_multi_stride=0;
public:
/* Pass in the pointers to the arrays to be operated on and their
* strides (templated version with appropriate types). */
virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
const To *B, const int ldb, /* batches share B */ const int B_multi_stride,
- Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) {
+ Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
+ const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride) {
_Aptr = A;
_lda = lda;
_A_batch_stride = A_batch_stride;
@@ -143,15 +151,19 @@ public:
_ldc = ldc;
_C_batch_stride = C_batch_stride;
_C_multi_stride = C_multi_stride;
+ _bias = bias;
+ _bias_multi_stride = bias_multi_stride;
}
/* Implementation of the void * overload which casts its arguments to the appropriate type. */
void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
- void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override {
+ void *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
+ const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override {
set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride,
static_cast<const To *>(B), ldb, B_multi_stride,
- static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride);
+ static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride,
+ static_cast<const Tr *>(bias), bias_multi_stride);
}
/*** "Pretransposed" interface ***/
@@ -164,7 +176,6 @@ public:
void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override {
pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride);
}
-
};
} // namespace arm_gemm
diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h
index d947be1ef9..e4d69eb93d 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMM.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMM.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_NEGEMM_H__
#define __ARM_COMPUTE_NEGEMM_H__
+#include "arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h"
#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
@@ -33,20 +34,27 @@
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/IWeightsManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
#include "arm_compute/runtime/Tensor.h"
-#include <memory>
-
namespace arm_compute
{
/** Basic function to execute GEMM on NEON. This function calls the following NEON kernels:
*
+ * If optimized assembly is available:
+ * -# @ref NEGEMMAssemblyDispatch
+ * -# @ref NEActivationLayer (if alpha != 1.0)
+ * Else:
* -# @ref NEGEMMInterleave4x4Kernel (if the output tensor is a matrix)
* -# @ref NEGEMMTranspose1xWKernel (if the output tensor is a matrix)
* -# @ref NEGEMMMatrixMultiplyKernel
- * -# @ref NEGEMMMatrixAdditionKernel (if c != nullptr and beta != 0.0)
+ * In both cases:
+ * -# @ref NEGEMMMatrixAdditionKernel (if c != nullptr and beta != 0.0 and is not reshaped once)
+ * Else:
+ * -# @ref NEArithmeticAdditionKernel (if c != nullptr and is reshaped once and not optimized assembly in place)
*
+ * -# @ref NEActivationLayer (if activation is specified in GEMMInfo)
*/
class NEGEMM : public IFunction
{
@@ -103,13 +111,21 @@ private:
NEGEMMMatrixMultiplyKernel _mm_kernel;
NEGEMMAssemblyDispatch _asm_glue;
NEGEMMMatrixAdditionKernel _ma_kernel;
- Tensor _tmp_a;
- Tensor _tmp_b;
- const ITensor *_original_b;
- bool _run_vector_matrix_multiplication;
- bool _run_addition;
- bool _reshape_b_only_on_first_run;
- bool _is_prepared;
+ NEActivationLayer _alpha_scale_func;
+ NEArithmeticAdditionKernel _add_bias_kernel;
+ NEActivationLayer _activation_func;
+
+ Tensor _tmp_a;
+ Tensor _tmp_b;
+ Tensor _tmp_d;
+ const ITensor *_original_b;
+ bool _run_vector_matrix_multiplication;
+ bool _run_alpha_scale;
+ bool _run_addition;
+ bool _run_bias_addition;
+ bool _run_activation;
+ bool _reshape_b_only_on_first_run;
+ bool _is_prepared;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEGEMM_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
index 83e495e695..20d189e76b 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h
@@ -59,29 +59,10 @@ public:
};
private:
- /** ACL Function */
- std::unique_ptr<IFunction> _function;
-
- /** If supported create the ACL function corresponding to the GemmMethod provided to process the other passed parameters
- *
- * @param[in] method GemmMethod to use to perform the matrix multiplication.
- * @param[in] a Input tensor (Matrix A).
- * @param[in] b Input tensor (Matrix B).
- * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations
- * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
- * @param[in] alpha Scalar multiplier to apply to AB matrix product.
- * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
- * @param[in] gemm_info GEMM meta-data
- *
- * @return True if the method is supported and the function was successfully created, false otherwise.
- */
- bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
-
/** Interface for the arm_gemm fallback */
- std::unique_ptr<IFallback> _arm_gemm;
- MemoryGroup _memory_group; /**< Function memory group */
- std::shared_ptr<IMemoryManager> _memory_manager; /**< Copy of the memory manager used to create the memory group to be used when instantiating new functions */
- IWeightsManager *_weights_manager; /**< Pointer to the weights manager */
+ std::unique_ptr<IFallback> _arm_gemm;
+ MemoryGroup _memory_group; /**< Function memory group */
+ IWeightsManager *_weights_manager; /**< Pointer to the weights manager */
public:
/** If supported create an ACL function else fallback to the arm_gemm function.
*
@@ -89,11 +70,9 @@ public:
* @param[in] b Input tensor (Matrix B)
* @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations
* @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
- * @param[in] alpha Scalar multiplier to apply to AB matrix product.
- * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
* @param[in] gemm_info GEMM meta-data
*/
- void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info);
+ void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const GEMMInfo &gemm_info);
/** Indicates whether or not this function can be used to process the given parameters.
*
@@ -101,13 +80,18 @@ public:
* @param[in] b Input tensor info (Matrix B)
* @param[in] c Input tensor info (Matrix C) used to pass the bias for quantized calculations
* @param[in] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
- * @param[in] alpha Scalar multiplier to apply to AB matrix product.
- * @param[in] beta Scalar multiplier to apply to input D matrix before adding product.
* @param[in] gemm_info GEMM meta-data
*
* @return a status.
*/
- static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info);
+ static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const GEMMInfo &gemm_info);
+ /** Checks if activation is supported by the gemm assembly dispatcher
+ *
+ * @param[in] activation Activation to check
+ *
+ * @return True if activation is supported else false
+ */
+ static bool is_activation_supported(const ActivationLayerInfo &activation);
/** Was the function successfully configured ?
*
* @return True if the function is configured and ready to run
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index dccc35f0af..3e551abf5a 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -26,7 +26,6 @@
#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h"
#include "arm_compute/core/NEON/kernels/NECol2ImKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h"
@@ -34,7 +33,6 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IWeightsManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
@@ -250,8 +248,6 @@ private:
NEGEMM _mm_gemm;
NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
NECol2ImKernel _col2im_kernel;
- NEActivationLayer _activationlayer_function;
- NEArithmeticAdditionKernel _add_bias_kernel;
NEReshapeLayer _reshape_layer;
const ITensor *_original_weights;
@@ -263,11 +259,9 @@ private:
DataLayout _data_layout;
- bool _append_bias;
bool _skip_im2col;
bool _skip_col2im;
bool _is_quantized;
- bool _is_activationlayer_enabled;
bool _is_prepared;
};
} // namespace arm_compute
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 5b6a0dd943..12c120934e 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__
#define __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__
+#include "NEActivationLayer.h"
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
@@ -46,6 +47,7 @@ class ITensor;
* -# @ref NEGEMMTranspose1xWKernel
* -# @ref NEGEMMLowpMatrixMultiplyKernel
* -# @ref NEGEMMLowpOffsetContributionKernel
+ * -# @ref NEActivationLayer
*
* otherwise if the DOT product instruction is available:
*
@@ -113,6 +115,7 @@ private:
NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel;
NEGEMMLowpOffsetContributionKernel _offset_contribution_kernel;
NEGEMMLowpOffsetContributionOutputStageKernel _offset_contribution_output_stage_kernel;
+ NEActivationLayer _activation_func;
Tensor _vector_sum_col;
Tensor _vector_sum_row;
Tensor _tmp_a;
@@ -127,6 +130,7 @@ private:
bool _reshape_b_only_on_first_run;
bool _is_prepared;
bool _fuse_output_stage;
+ bool _run_activation;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__ */
diff --git a/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h b/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
deleted file mode 100644
index 695dcd5b6e..0000000000
--- a/arm_compute/runtime/NEON/functions/assembly/NEGEMMInterleavedWrapper.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2018-2019 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDWRAPPER_H__
-#define __ARM_COMPUTE_NEGEMMINTERLEAVEDWRAPPER_H__
-
-#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
-#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
-#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedMatrixMultiplyWrapper.h"
-#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h"
-#include "arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedTransformAWrapper.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/IMemoryManager.h"
-#include "arm_compute/runtime/IScheduler.h"
-#include "arm_compute/runtime/IWeightsManager.h"
-#include "arm_compute/runtime/MemoryGroup.h"
-#include "arm_compute/runtime/Tensor.h"
-
-#include <memory>
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** Buffer manager used when reshaping B on the fly
- *
- * The typical workflow is:
- * - lock_to_reshape_if_needed()
- * - If the previous lock was successful: mark_as_reshaped()
- * - wait_for_reshaping() wait for the reshaping to be complete
- * - mark_as_unused() once the thread is done using this given buffer.
- *
- * Calls for different indices might be interleaved, however the calls for a given index must always be in that order.
- */
-class IBufferManager
-{
-public:
- /** Lock a buffer for the given index if it's available else return
- *
- * @param[in] index Index of the buffer to lock
- *
- * @return True if the buffer has been successfully locked, false if it's already reshaped / being reshaped.
- */
- virtual bool lock_to_reshape_if_needed(unsigned int index) = 0;
- /** Mark a buffer previously locked as reshaped
- *
- * @pre The thread calling this function must have locked the given buffer through lock_to_reshape_if_needed()
- *
- * @param[in] index Index of the buffer to mark as reshaped
- */
- virtual void mark_as_reshaped(unsigned int index) = 0;
- /** Block until the given buffer is marked as reshaped
- *
- * @param[in] index Index of the buffer
- */
- virtual void wait_for_reshaping(unsigned int index) = 0;
- /** Mark a reshaped buffer as unused
- *
- * Once all the users have marked a buffer as unused then it goes back to being free
- */
- virtual void mark_as_unused(unsigned int index) = 0;
-
- /** Number of buffers used internally
- *
- * @return The number of buffers used by the manager.
- */
- virtual unsigned int num_buffers() const = 0;
- /** Default destructor */
- virtual ~IBufferManager() = default;
-};
-
-/** Equivalent to arm_gemm::GemmInterleaved but using Compute Library types.
- */
-class NEGEMMInterleavedWrapper : public IFunction
-{
-public:
- NEGEMMInterleavedWrapper(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
- ~NEGEMMInterleavedWrapper() = default;
-
- NEGEMMInterleavedWrapper(const NEGEMMInterleavedWrapper &) = delete;
- NEGEMMInterleavedWrapper &operator=(const NEGEMMInterleavedWrapper &) = delete;
-
- /** Initialise the kernel's input and output.
- *
- * @note The input and output tensor must have the same dimensions
- *
- * @param[in] a Input tensor (Matrix A)
- * @param[in] b Input tensor (Matrix B)
- * @param[out] c Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
- * @param[in] alpha Scalar multiplier to apply to AB matrix product.
- * @param[in] beta Scalar multiplier to apply to input C matrix before adding product.
- * @param[in] gemm_info GEMM meta-data
- */
- void configure(const ITensor *a, const ITensor *b, ITensor *c, float alpha, float beta, const GEMMInfo &gemm_info);
-
- // Inherited methods overridden:
- void run() override;
- void prepare() override;
-
-private:
- MemoryGroup _memory_group;
- IWeightsManager *_weights_manager;
- bool _is_prepared{ false };
- bool _pretranspose_b{ false };
- Window _block_walker{};
- Window _batch_window{};
- const ITensor *_a{ nullptr };
- const ITensor *_b{ nullptr };
- ITensor *_c{ nullptr };
- Tensor _transformed_b{};
- Tensor _transformed_a{};
- Tensor _tmp_c{};
- INEGEMMWrapperKernel::Params _params{};
- BlockSizes _block_sizes{};
- std::unique_ptr<NEGEMMInterleavedPrepareBWrapperKernel> _prepare_b{ nullptr };
- std::unique_ptr<NEGEMMInterleavedTransformAWrapper> _transform_a{ nullptr };
- std::unique_ptr<NEGEMMInterleavedMatrixMultiplyWrapper> _matrix_multiply{ nullptr };
- std::unique_ptr<IBufferManager> _buffer_manager{ nullptr };
- std::vector<TransformAWorkload> _a_workloads{};
- std::vector<PrepareBWorkload> _b_workloads{};
- std::vector<MatrixMultiplyWorkload> _mm_workloads{};
- std::vector<IScheduler::Workload> _workloads{};
- std::string _tag{};
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_NEGEMMINTERLEAVEDWRAPPER_H__ */