From 4f7693d8757cf12c33f049c61c63bc689379ab84 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Wed, 12 May 2021 13:59:10 +0100 Subject: Rename NEGEMMAssembly to CpuGemmAssembly - Dispatch, WrapperKernel has been renamed and moved - Header files for assembly kernels have been moved Partially Resolves: COMPMID-4506 Change-Id: I6c2f391bb95ba1ce7ca195d0efa57b9c3225570f Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5637 Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- Android.bp | 2 +- SConscript | 6 +- arm_compute/runtime/NEON/functions/NEGEMM.h | 27 +- arm_compute/runtime/NEON/functions/NEGEMMConv2d.h | 21 +- .../NEON/functions/NEGEMMLowpMatrixMultiplyCore.h | 7 +- .../NEON/functions/NEWinogradConvolutionLayer.h | 2 +- .../kernels/assembly/NEGEMMAssemblyWrapperKernel.h | 120 --- src/core/NEON/kernels/assembly/arm_gemm.hpp | 189 ----- .../kernels/assembly/arm_gemm_compute_iface.hpp | 122 --- src/core/NEON/kernels/assembly/arm_gemm_local.hpp | 31 - .../kernels/assembly/convolution_parameters.hpp | 65 -- src/core/NEON/kernels/assembly/gemm_common.hpp | 229 ------ src/core/NEON/kernels/assembly/ndrange.hpp | 199 ----- .../assembly/CpuGemmAssemblyWrapperKernel.h | 126 +++ src/core/cpu/kernels/assembly/arm_gemm.hpp | 189 +++++ .../kernels/assembly/arm_gemm_compute_iface.hpp | 130 ++++ src/core/cpu/kernels/assembly/arm_gemm_local.hpp | 31 + .../kernels/assembly/convolution_parameters.hpp | 65 ++ src/core/cpu/kernels/assembly/gemm_common.hpp | 229 ++++++ src/core/cpu/kernels/assembly/ndrange.hpp | 199 +++++ src/runtime/NEON/functions/NEGEMM.cpp | 24 +- .../NEON/functions/NEGEMMAssemblyDispatch.cpp | 860 -------------------- .../NEON/functions/NEGEMMAssemblyDispatch.h | 125 --- src/runtime/NEON/functions/NEGEMMConv2d.cpp | 16 +- .../functions/NEGEMMLowpMatrixMultiplyCore.cpp | 22 +- .../NEON/functions/NEWinogradConvolutionLayer.cpp | 2 +- .../operators/internal/CpuGemmAssemblyDispatch.cpp | 863 +++++++++++++++++++++ .../operators/internal/CpuGemmAssemblyDispatch.h | 131 ++++ 28 files changed, 2033 insertions(+), 1999 deletions(-) delete mode 100644 src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h delete mode 100644 src/core/NEON/kernels/assembly/arm_gemm.hpp delete mode 100644 src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp delete mode 100644 src/core/NEON/kernels/assembly/arm_gemm_local.hpp delete mode 100644 src/core/NEON/kernels/assembly/convolution_parameters.hpp delete mode 100644 src/core/NEON/kernels/assembly/gemm_common.hpp delete mode 100644 src/core/NEON/kernels/assembly/ndrange.hpp create mode 100644 src/core/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h create mode 100644 src/core/cpu/kernels/assembly/arm_gemm.hpp create mode 100644 src/core/cpu/kernels/assembly/arm_gemm_compute_iface.hpp create mode 100644 src/core/cpu/kernels/assembly/arm_gemm_local.hpp create mode 100644 src/core/cpu/kernels/assembly/convolution_parameters.hpp create mode 100644 src/core/cpu/kernels/assembly/gemm_common.hpp create mode 100644 src/core/cpu/kernels/assembly/ndrange.hpp delete mode 100644 src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp delete mode 100644 src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h create mode 100644 src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp create mode 100644 src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h diff --git a/Android.bp b/Android.bp index 6ece3f8636..f88ddc47f8 100644 --- a/Android.bp +++ b/Android.bp @@ -568,7 +568,6 @@ cc_library_static { "src/runtime/NEON/functions/NEFullyConnectedLayer.cpp", "src/runtime/NEON/functions/NEFuseBatchNormalization.cpp", "src/runtime/NEON/functions/NEGEMM.cpp", - "src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp", "src/runtime/NEON/functions/NEGEMMConv2d.cpp", "src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp", "src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp", @@ -650,6 +649,7 @@ cc_library_static { "src/runtime/cpu/operators/CpuSoftmax.cpp", "src/runtime/cpu/operators/CpuSub.cpp", "src/runtime/cpu/operators/CpuTranspose.cpp", + "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp", "src/runtime/gpu/cl/operators/ClActivation.cpp", "src/runtime/gpu/cl/operators/ClAdd.cpp", "src/runtime/gpu/cl/operators/ClConcatenate.cpp", diff --git a/SConscript b/SConscript index d5b2221204..83c5a7da84 100644 --- a/SConscript +++ b/SConscript @@ -260,7 +260,8 @@ if env['neon']: "src/core/NEON/kernels/convolution/winograd/", "src/core/NEON/kernels/convolution/depthwise/", "src/core/NEON/kernels/assembly/", - "arm_compute/core/NEON/kernels/assembly/"]) + "arm_compute/core/NEON/kernels/assembly/", + "src/core/cpu/kernels/assembly/",]) graph_files += Glob('src/graph/backends/NEON/*.cpp') @@ -367,7 +368,8 @@ if env['neon']: 'src/runtime/cpu/operators/CpuSub.cpp', 'src/runtime/cpu/operators/CpuTranspose.cpp', ] - runtime_files += [ cpu_rt_files, cpu_operator_hp_files, cpu_operator_files ] + cpu_internal_operator_files = ['src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp',] + runtime_files += [ cpu_rt_files, cpu_operator_hp_files, cpu_operator_files, cpu_internal_operator_files ] bootcode_o = [] if env['os'] == 'bare_metal': diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h index d4a9f68beb..9df2e08956 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMM.h +++ b/arm_compute/runtime/NEON/functions/NEGEMM.h @@ -41,12 +41,15 @@ class NEGEMMInterleave4x4Kernel; class NEGEMMMatrixAdditionKernel; class NEGEMMMatrixMultiplyKernel; class NEGEMMTranspose1xWKernel; -class NEGEMMAssemblyDispatch; +namespace cpu +{ +class CpuGemmAssemblyDispatch; +} /** Basic function to execute GEMM. This function calls the following kernels: * * If optimized assembly is available: - * -# @ref NEGEMMAssemblyDispatch + * -# @ref cpu::CpuGemmAssemblyDispatch * -# @ref NEActivationLayer (if alpha != 1.0) * Else: * -# @ref NEGEMMInterleave4x4Kernel (if the output tensor is a matrix) @@ -119,16 +122,16 @@ public: void prepare() override; private: - MemoryGroup _memory_group; - IWeightsManager *_weights_manager; - std::unique_ptr _interleave_kernel; - std::unique_ptr _transpose_kernel; - std::unique_ptr _mm_kernel; - std::unique_ptr _asm_glue; - std::unique_ptr _ma_kernel; - NEActivationLayer _alpha_scale_func; - NEArithmeticAddition _add_bias; - NEActivationLayer _activation_func; + MemoryGroup _memory_group; + IWeightsManager *_weights_manager; + std::unique_ptr _interleave_kernel; + std::unique_ptr _transpose_kernel; + std::unique_ptr _mm_kernel; + std::unique_ptr _asm_glue; + std::unique_ptr _ma_kernel; + NEActivationLayer _alpha_scale_func; + NEArithmeticAddition _add_bias; + NEActivationLayer _activation_func; Tensor _tmp_a; Tensor _tmp_b; diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConv2d.h b/arm_compute/runtime/NEON/functions/NEGEMMConv2d.h index b2ffd038de..6c71f0e188 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMConv2d.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMConv2d.h @@ -36,13 +36,16 @@ namespace arm_compute { // Forward declarations class ITensor; -class NEGEMMAssemblyDispatch; +namespace cpu +{ +class CpuGemmAssemblyDispatch; +} /** Basic function to compute the convolution layer. This function calls the following kernels/functions: * * Supports only NHWC data layout * - * -# @ref NEGEMMAssemblyDispatch + * -# @ref cpu::CpuGemmAssemblyDispatch * -# @ref NEActivationLayer, in case activation cannot be fused in the assembly dispatch * * Weights are transformed from OHWI to HWIO format using the following kernels: @@ -111,13 +114,13 @@ public: void prepare() override; private: - std::unique_ptr _gemm_asm_func; - NEActivationLayer _activation_func; - NEPermute _weights_permute_func; - const ITensor *_original_weights; - Tensor _permuted_weights; - bool _is_prepared; - bool _run_activation; + std::unique_ptr _gemm_asm_func; + NEActivationLayer _activation_func; + NEPermute _weights_permute_func; + const ITensor *_original_weights; + Tensor _permuted_weights; + bool _is_prepared; + bool _run_activation; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEGEMMCONV2D_H */ diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h index 780723e752..a292712bd7 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h @@ -44,7 +44,10 @@ class NEGEMMLowpOffsetContributionOutputStageKernel; class NEGEMMLowpMatrixAReductionKernel; class NEGEMMLowpMatrixBReductionKernel; class NEGEMMTranspose1xWKernel; -class NEGEMMAssemblyDispatch; +namespace cpu +{ +class CpuGemmAssemblyDispatch; +} /** Basic function to execute GEMMLowpMatrixMultiplyCore. This function calls the following kernels if the DOT product instruction is not available: * @@ -135,7 +138,7 @@ public: private: MemoryGroup _memory_group; IWeightsManager *_weights_manager; - std::unique_ptr _asm_glue; + std::unique_ptr _asm_glue; std::unique_ptr _mm_kernel; std::unique_ptr _mtx_a_reshape_kernel; std::unique_ptr _mtx_b_reshape_kernel; diff --git a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h index 77f9093ed4..f9ebf608cb 100644 --- a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h +++ b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h @@ -47,7 +47,7 @@ class ICPPKernel; * -# @ref NEWinogradLayerTransformWeightsKernel (executed only once in the first call to the run() method ) * -# @ref NEWinogradLayerTransformInputKernel * -# @ref NEWinogradLayerTransformOutputKernel - * -# @ref NEGEMMAssemblyDispatch + * -# @ref cpu::CpuGemmAssemblyDispatch * -# @ref CPPPermute (three times: weights, input and output) * * @note Some Winograd configurations (i.e. F(2x2, 5x5), F(4x4, 5x5)) are supported only with enable_fast_math = true diff --git a/src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h b/src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h deleted file mode 100644 index 7fcf2b1e4d..0000000000 --- a/src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2018-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H -#define ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H - -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_gemm_compute_iface.hpp" -#include "src/core/NEON/INEKernel.h" - -#include "gemm_common.hpp" - -namespace arm_compute -{ -class ITensor; - -/** This class is a wrapper for the assembly kernels. - * - * Some kernels were written in assembly and highly optimised for specific CPUs like A53 or A55. - * This class works as a wrapper for these assembly kernels. The arm compute library creates an instance - * of NEGEMMAssemblyWrapperKernel and other auxiliary data structures to execute a single assembly kernel - * in the context of an NEFunctions. - * - * The type T is the type of the actual kernel implemented in assembly which is of type - * template class GemmCommon - * - * - */ -template -class NEGEMMAssemblyWrapperKernel final : public INEKernel -{ -public: - /** Constructor - */ - NEGEMMAssemblyWrapperKernel() - : _kernel(nullptr), _name("NEGEMMAssemblyWrapperKernel") - { - } - - NEGEMMAssemblyWrapperKernel(NEGEMMAssemblyWrapperKernel &) = delete; - NEGEMMAssemblyWrapperKernel(NEGEMMAssemblyWrapperKernel &&) = default; - NEGEMMAssemblyWrapperKernel &operator=(NEGEMMAssemblyWrapperKernel &) = delete; - - const char *name() const override - { - return _name.c_str(); - } - - void run(const Window &window, const ThreadInfo &info) override - { - ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast(_kernel))); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - - auto win = arm_gemm::to_ndcoord(window); - - arm_gemm::ndcoord_t thread_locator{}; - - _kernel->execute(win, thread_locator, info.thread_id); - } - - // Inherited methods overridden: - void run_nd(const Window &window, const ThreadInfo &info, const Window &thread_locator) override - { - ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast(_kernel))); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - - //convert between arm_compute and arm_gemm types - auto ndc_win = arm_gemm::to_ndcoord(window); - auto ndc_tlc = arm_gemm::to_ndcoord(thread_locator); - - _kernel->execute(ndc_win, ndc_tlc, info.thread_id); - } - - /** Initialise the kernel's input and output. - * - * @param[in] kernel Pointer to an assembly kernel implementation. - * @param[in] num_threads Number of concurrent threads which will execute the kernel. - */ - void configure(arm_gemm::GemmCommon *kernel, std::string kernel_name_tag) - { - ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast(kernel))); - _kernel = kernel; - - Window win = to_window(kernel->get_window_size()); - - INEKernel::configure(win); - - if(!kernel_name_tag.empty()) - { - _name += "/" + kernel_name_tag; - } - } - -private: - arm_gemm::GemmCommon *_kernel; - std::string _name; -}; -} // namespace arm_compute -#endif /* ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H */ diff --git a/src/core/NEON/kernels/assembly/arm_gemm.hpp b/src/core/NEON/kernels/assembly/arm_gemm.hpp deleted file mode 100644 index 3088b080d6..0000000000 --- a/src/core/NEON/kernels/assembly/arm_gemm.hpp +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright (c) 2018-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#pragma once - -#include -#include - -#include "arm_gemm_local.hpp" -#include "gemm_common.hpp" - -namespace arm_gemm -{ -enum class GemmMethod -{ - DEFAULT, - GEMV_BATCHED, - GEMV_PRETRANSPOSED, - GEMV_NATIVE_TRANSPOSED, - GEMM_NATIVE, - GEMM_HYBRID, - GEMM_INTERLEAVED, - GEMM_INTERLEAVED_2D, - QUANTIZE_WRAPPER, - QUANTIZE_WRAPPER_2D, - GEMM_HYBRID_QUANTIZED, - INDIRECT_GEMM, - CONVOLUTION_GEMM -}; - -struct KernelDescription -{ - GemmMethod method = GemmMethod::DEFAULT; - std::string name = ""; - bool is_default = false; - uint64_t cycle_estimate = 0; - - KernelDescription(GemmMethod m, std::string n, bool d = false, uint64_t c = 0) - : method(m), name(n), is_default(d), cycle_estimate(c) - { - } - KernelDescription() noexcept - { - } -}; - -struct GemmConfig -{ - GemmMethod method = GemmMethod::DEFAULT; - std::string filter = ""; - unsigned int inner_block_size = 0; - unsigned int outer_block_size = 0; - - GemmConfig(GemmMethod method) - : method(method) - { - } - GemmConfig() - { - } -}; - -struct Activation -{ - enum class Type - { - None, - ReLU, - BoundedReLU - }; - - Type type; - float param1; - float param2; - - Activation(Type type = Type::None, float p1 = 0.0f, float p2 = 0.0f) - : type(type), param1(p1), param2(p2) - { - } -}; - -struct GemmArgs -{ -public: - const CPUInfo *_ci; - unsigned int _Msize; - unsigned int _Nsize; - unsigned int _Ksize; - unsigned int _Ksections; - unsigned int _nbatches; - unsigned int _nmulti; - bool _indirect_input; - Activation _act; - int _maxthreads; - const GemmConfig *_cfg; - - GemmArgs(const CPUInfo *ci, unsigned int M, unsigned int N, - unsigned int K, unsigned int Ksections, unsigned int nbatches, - unsigned int nmulti, bool indirect_input, Activation act, const int maxthreads, - const GemmConfig *cfg = nullptr) - : _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _Ksections(Ksections), _nbatches(nbatches), _nmulti(nmulti), _indirect_input(indirect_input), _act(act), _maxthreads(maxthreads), _cfg(cfg) - { - } -}; - -struct Requantize32 -{ -public: - const int32_t *bias = nullptr; - size_t bias_multi_stride = 0; - int32_t a_offset = 0; - int32_t b_offset = 0; - int32_t c_offset = 0; - bool per_channel_requant = false; - int32_t per_layer_left_shift = 0; - int32_t per_layer_right_shift = 0; - int32_t per_layer_mul = 0; - const int32_t *per_channel_left_shifts = nullptr; - const int32_t *per_channel_right_shifts = nullptr; - const int32_t *per_channel_muls = nullptr; - int32_t minval = 0; - int32_t maxval = 0; - - Requantize32() = default; - - // Constructor for per-tensor quantization - Requantize32(const int32_t *bias, size_t bias_multi_stride, - int32_t a_offset, int32_t b_offset, int32_t c_offset, - int32_t requant_shift, int32_t requant_mul, int32_t minv, int32_t maxv) - : bias(bias), bias_multi_stride(bias_multi_stride), a_offset(a_offset), b_offset(b_offset), c_offset(c_offset), per_channel_requant(false), per_layer_left_shift(std::max(requant_shift, 0)), - per_layer_right_shift(std::min(requant_shift, 0)), per_layer_mul(requant_mul), minval(minv), maxval(maxv) - { - } - - // Constructor for per-channel quantization - Requantize32(const int32_t *bias, size_t bias_multi_stride, - int32_t a_offset, int32_t b_offset, int32_t c_offset, - const int32_t *requant_left_shifts, - const int32_t *requant_right_shifts, - const int32_t *requant_muls, - int32_t minv, int32_t maxv) - : bias(bias), bias_multi_stride(bias_multi_stride), a_offset(a_offset), b_offset(b_offset), c_offset(c_offset), per_channel_requant(true), per_channel_left_shifts(requant_left_shifts), - per_channel_right_shifts(requant_right_shifts), per_channel_muls(requant_muls), minval(minv), maxval(maxv) - { - } -}; - -struct Nothing -{ -}; - -template -using UniqueGemmCommon = std::unique_ptr>; - -/* Low level API calls. - * These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */ - -/* get_gemm_method(): Given the templated types and provided parameters, - * which is the preferred method to implement this GEMM? */ -template -KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & = {}); - -template -UniqueGemmCommon gemm(const GemmArgs &args, const OutputStage & = {}); - -template -std::vector get_compatible_kernels(const GemmArgs &args, const OutputStage & = {}); - -} // namespace arm_gemm diff --git a/src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp b/src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp deleted file mode 100644 index d62047797f..0000000000 --- a/src/core/NEON/kernels/assembly/arm_gemm_compute_iface.hpp +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#pragma once - -#include "arm_compute/core/Window.h" -#include "arm_compute/core/Dimensions.h" - -#include "ndrange.hpp" - -#include - -/* This file contains mapping between integral types used in arm_compute and arm_gemm - * These two codebases both require a degree of separation for the sake of modularity - * so maintain their own types which represent similar information. - */ - -namespace arm_gemm { - -//we want to unify the maximum number of dimensions used beween arm_gemm and arm compute library -constexpr std::size_t ndrange_max = - arm_compute::Dimensions::num_max_dimensions; - -using ndrange_t=NDRange; -using ndcoord_t=NDCoordinate; - -/* Converts an `arm_gemm::ndrange_t` to a `arm_compute::Window` - * - * As `NDRange` does not not encode start positions, we specify - * the start to be zero in the produced `arm_compute::Window` - * - * @param [ndr] the `arm_gemm::ndrange_t` we wish to convert into a `arm_compute::Window` - * @returns an `arm_compute::Window` representing the same dimensional ranges as `ndr` - */ -inline arm_compute::Window to_window(const ndrange_t& ndr) { - arm_compute::Window win; - - for(unsigned int i = 0; i!=ndrange_max; ++i) { - //populate the window with the dimensions of the NDRange - win.set(i, arm_compute::Window::Dimension(0, ndr.get_size(i))); - } - - return win; -} - -/* - * Converts an `arm_gemm::ndcoord_t` to a `arm_compute::Window` - * - * @param [ndc] the `arm_gemm::ndcoord_t` we wish to convert into a `arm_compute::Window` - * @returns an `arm_compute::Window` representing the same dimensional ranges as `ndc` - */ -inline arm_compute::Window to_window(const ndcoord_t& ndc) { - arm_compute::Window win; - - for(unsigned int i = 0; i!=ndrange_max; ++i) { - const auto start = ndc.get_position(i); - const auto size = ndc.get_size(i); - const auto stop = start + size; - - //populate the window with the dimensions of the NDRange - win.set(i, arm_compute::Window::Dimension(start, stop)); - } - - return win; -} - -/** Convert an `arm_compute::Window` to an `arm_gemm::NDRange` of the same max dimensions - * - * It should be noted that `arm_compute::Window` specifies a `start()` and an `end()` - * where as `arm_gemm::ndrange_t` only has a size, as a result we store the delta between the range - * - * @param [win] the `arm_compute::Window` we want to convert to `arm_gemm::ndrange_t` - * @return the resultant ndrange_t - */ -inline ndrange_t to_ndrange(const arm_compute::Window& win) { - return { - static_cast(win[0].end() - win[0].start()), - static_cast(win[1].end() - win[1].start()), - static_cast(win[2].end() - win[2].start()), - static_cast(win[3].end() - win[3].start()), - static_cast(win[4].end() - win[4].start()), - static_cast(win[5].end() - win[5].start()) - }; -} - -/** Convert an `arm_compute::Window` to an `arm_gemm::NDCoord` of the same max dimensions - * - * @param [win] the `arm_compute::Window` we want to convert to `arm_gemm::ndcoord_t` - * @return the resultant ndcoord_t - */ -inline ndcoord_t to_ndcoord(const arm_compute::Window& win) { - return { - { static_cast(win[0].start()), static_cast(win[0].end() - win[0].start()) }, - { static_cast(win[1].start()), static_cast(win[1].end() - win[1].start()) }, - { static_cast(win[2].start()), static_cast(win[2].end() - win[2].start()) }, - { static_cast(win[3].start()), static_cast(win[3].end() - win[3].start()) }, - { static_cast(win[4].start()), static_cast(win[4].end() - win[4].start()) }, - { static_cast(win[5].start()), static_cast(win[5].end() - win[5].start()) } - }; -} - -} //namespace arm_gemm diff --git a/src/core/NEON/kernels/assembly/arm_gemm_local.hpp b/src/core/NEON/kernels/assembly/arm_gemm_local.hpp deleted file mode 100644 index c08ed2d5e3..0000000000 --- a/src/core/NEON/kernels/assembly/arm_gemm_local.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2018-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#pragma once - -/* This file is used to configure integration-specific aspects of arm_gemm into ACL */ - -#include "arm_compute/core/CPP/CPPTypes.h" - -using CPUModel = arm_compute::CPUModel; -using CPUInfo = arm_compute::CPUInfo; diff --git a/src/core/NEON/kernels/assembly/convolution_parameters.hpp b/src/core/NEON/kernels/assembly/convolution_parameters.hpp deleted file mode 100644 index d0ef5b539f..0000000000 --- a/src/core/NEON/kernels/assembly/convolution_parameters.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2018-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#pragma once - -#include - -namespace arm_gemm -{ -/* - * Parameter set for "convolution" type GEMM. - * - * For a "convolution" GEMM, the GEMM parameters (M, K) are specified as if - * an im2row had been performed on the input tensor to generate the operand - * matrix, but instead this structure describes the convolution parameters - * such that this can be done on the fly. - * - * The parameters describe the convolution details - the notional shape of - * the input and output tensors, whether padding is to be applied, the size - * of the kernel and a constant value to be used for padding (needed for - * quantized tensors). - * - * The second part describes the layout of the input tensor in memory, which - * is assumed to be in NHWC format. This consists of a base pointer and - * strides for columns, rows and batches. 'multis' are not supported for - * convolution type GEMMs. - */ -struct ConvolutionParameters -{ - int64_t input_width; - int64_t input_height; - int64_t input_channels; - int64_t kernel_width; - int64_t kernel_height; - int64_t output_width; - int64_t output_height; - int64_t output_stride_w; - int64_t output_stride_h; - // output_channels not included as they do not affect the input. - int64_t padding_top; - int64_t padding_left; - float padding_value; -}; - -} // namespace arm_gemm diff --git a/src/core/NEON/kernels/assembly/gemm_common.hpp b/src/core/NEON/kernels/assembly/gemm_common.hpp deleted file mode 100644 index e1fb7a45a8..0000000000 --- a/src/core/NEON/kernels/assembly/gemm_common.hpp +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (c) 2017-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#pragma once - -#include "convolution_parameters.hpp" -#include "ndrange.hpp" - -#include - -namespace arm_gemm -{ -// Abstract class for the GEMM/GEMV functions. -// -// GEMM implementations may be "native" (never require any input -// permutation), "pretransposed" (require permutation up-front) or require -// working space (permute as they go along). This interface should support -// all of them. - -// The real GemmCommon class is templated based on the operand and return -// type. This is an interface class which is independent of those types. -class IGemmCommon -{ -public: - /* Pass in the pointers to the arrays to be operated on and their - * strides. This "generic" version uses void *s, the preferred version - * is the one provided by templated GemmCommon (below) which takes - * appropriately typed pointers. If B is pretransposed (see below) then - * the settings for B here are ignored. - */ - virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, - const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0; - - /** @returns an ndrange containing ranges of the compute space which can be - * broken up and parallelised over - */ - virtual ndrange_t get_window_size() const = 0; - - /* The maximum thread count is specified when the GEMM is created. Some - * implementations need to know how many threads will actually run in - * order to work properly. - * - * In some cases, after creating the GEMM the number of threads needs to - * be reduced (e.g. not enough work to split across threads). This - * method allows the number of actual threads to be run to be set (must - * be equal or lower). - * - * This has an empty default implementation, as GEMMs which don't care - * about thread count can safely ignore this. - */ - virtual void set_nthreads(int) {}; - - /* Whether this GEMM can be dynamically scheduled or not. */ - virtual bool supports_dynamic_scheduling() const - { - return false; - } - - /** Main execute member fucntion - * @param [in] work_range specifies the range of work we want to be computed, total range defined by get_window_size() - * @param [in] thread_locator where are we inside of the thread space - * @naram [in] threadid a unique threadid - */ - virtual void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) = 0; - - /*** Working space interface (optional) ***/ - /* Total number of bytes of temporary working space needed. If zero, it's not necessary to call set_working_space(). */ - virtual size_t get_working_size() const - { - return 0; - } - /* Provide working space buffer - the void * passed in must remain allocated for the duration of any execute calls. */ - virtual void set_working_space(void *) {}; - - /*** "Pretransposed" interface (optional) ***/ - /* Is this object set up for pretranspose? If so, pretranspose_array() needs to be called before execute(); */ - virtual bool B_is_pretransposed() const - { - return false; - } - /* Does pretranspose still need to be done? */ - virtual bool B_pretranspose_required() const - { - return false; - } - /* Total number of bytes of space needed for pretransposed arrays. */ - virtual size_t get_B_pretransposed_array_size() const - { - return 0; - } - /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */ - /* The "real" version of this depends on the templated operand type (see below). */ - virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0; - /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */ - virtual void set_pretransposed_B_data(void *) - { - } - - /*** "Quantized bias" interface (optional) ***/ - /* Set the bias vector for quantized GEMMs */ - virtual void set_quantized_bias(const int32_t *, size_t) - { - } - - /*** Indirect interface (optional) ***/ - /* Set the indirect table. This comprises a number of values per kernel point, and a densely packed array of pointers, - * multis * batches * kernel_points */ - virtual void set_indirect_parameters_generic(size_t, const void *const *const *) - { - } - - /*** Convolution interface (optional) ***/ - /* Set the convolution parameters. */ - virtual void set_convolution_parameters(ConvolutionParameters) - { - } - - // Destructor - virtual ~IGemmCommon() - { - } -}; - -/* "Real" GemmCommon class which is templated on the operand and return types. - * - * In addition to correctly typed versions of the functions that operate on - * operand and return data, this class provides a default implementation of - * 'set_arrays' to capture the provided arguments in protected class - * members, as essentially any implementation will need these. - */ -template -class GemmCommon : public IGemmCommon -{ -protected: - const To *_Aptr = nullptr; - int _lda = 0; - int _A_batch_stride = 0; - int _A_multi_stride = 0; - const To *_Bptr = nullptr; - int _ldb = 0; - int _B_multi_stride = 0; - Tr *_Cptr = nullptr; - int _ldc = 0; - int _C_batch_stride = 0; - int _C_multi_stride = 0; - const Tr *_bias = nullptr; - int _bias_multi_stride = 0; - -public: - /* Pass in the pointers to the arrays to be operated on and their - * strides (templated version with appropriate types). */ - virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const To *B, const int ldb, /* batches share B */ const int B_multi_stride, - Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, - const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride) - { - _Aptr = A; - _lda = lda; - _A_batch_stride = A_batch_stride; - _A_multi_stride = A_multi_stride; - _Bptr = B; - _ldb = ldb; - _B_multi_stride = B_multi_stride; - _Cptr = C; - _ldc = ldc; - _C_batch_stride = C_batch_stride; - _C_multi_stride = C_multi_stride; - _bias = bias; - _bias_multi_stride = bias_multi_stride; - } - - /* Implementation of the void * overload which casts its arguments to the appropriate type. */ - void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, - const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override - { - set_arrays(static_cast(A), lda, A_batch_stride, A_multi_stride, - static_cast(B), ldb, B_multi_stride, - static_cast(C), ldc, C_batch_stride, C_multi_stride, - static_cast(bias), bias_multi_stride); - } - - /*** "Pretransposed" interface ***/ - - /* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */ - /* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */ - virtual void pretranspose_B_array(void *, const To *, const int, const int) {}; - - /* Implementation of the void * overload which casts its arguments to the appropriate type. */ - void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override - { - pretranspose_B_array(out, static_cast(in), row_stride, multi_stride); - } - - /*** Indirect interface ***/ - virtual void set_indirect_parameters(size_t, const To *const *const *) - { - } - - void set_indirect_parameters_generic(size_t sz, const void *const *const *ptr) override - { - set_indirect_parameters(sz, reinterpret_cast(ptr)); - } -}; - -} // namespace arm_gemm diff --git a/src/core/NEON/kernels/assembly/ndrange.hpp b/src/core/NEON/kernels/assembly/ndrange.hpp deleted file mode 100644 index a2bb60f687..0000000000 --- a/src/core/NEON/kernels/assembly/ndrange.hpp +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (c) 2019-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#pragma once - -#include -#include -#include -#include - -namespace arm_gemm -{ -template -class NDRange -{ -private: - std::array m_sizes{}; - std::array m_totalsizes{}; - - class NDRangeIterator - { - private: - const NDRange &m_parent; - unsigned int m_pos = 0; - unsigned int m_end = 0; - - public: - NDRangeIterator(const NDRange &p, unsigned int s, unsigned int e) - : m_parent(p), m_pos(s), m_end(e) - { - } - - bool done() const - { - return (m_pos >= m_end); - } - - unsigned int dim(unsigned int d) const - { - unsigned int r = m_pos; - - if(d < (D - 1)) - { - r %= m_parent.m_totalsizes[d]; - } - - if(d > 0) - { - r /= m_parent.m_totalsizes[d - 1]; - } - - return r; - } - - bool next_dim0() - { - m_pos++; - - return !done(); - } - - bool next_dim1() - { - m_pos += m_parent.m_sizes[0] - dim(0); - - return !done(); - } - - unsigned int dim0_max() const - { - unsigned int offset = std::min(m_end - m_pos, m_parent.m_sizes[0] - dim(0)); - - return dim(0) + offset; - } - }; - - void set_totalsizes() - { - unsigned int t = 1; - - for(unsigned int i = 0; i < D; i++) - { - if(m_sizes[i] == 0) - { - m_sizes[i] = 1; - } - - t *= m_sizes[i]; - - m_totalsizes[i] = t; - } - } - -public: - NDRange &operator=(const NDRange &rhs) = default; - NDRange(const NDRange &rhs) = default; - - template - NDRange(T... ts) - : m_sizes{ ts... } - { - set_totalsizes(); - } - - NDRange(const std::array &n) - : m_sizes(n) - { - set_totalsizes(); - } - - NDRangeIterator iterator(unsigned int start, unsigned int end) const - { - return NDRangeIterator(*this, start, end); - } - - unsigned int total_size() const - { - return m_totalsizes[D - 1]; - } - - unsigned int get_size(unsigned int v) const - { - return m_sizes[v]; - } -}; - -/** NDCoordinate builds upon a range, but specifies a starting position - * in addition to a size which it inherits from NDRange - */ -template -class NDCoordinate : public NDRange -{ - using int_t = unsigned int; - using ndrange_t = NDRange; - - std::array m_positions{}; - -public: - NDCoordinate &operator=(const NDCoordinate &rhs) = default; - NDCoordinate(const NDCoordinate &rhs) = default; - NDCoordinate(const std::initializer_list> &list) - { - std::array sizes{}; - - std::size_t i = 0; - for(auto &p : list) - { - m_positions[i] = p.first; - sizes[i++] = p.second; - } - - //update the parents sizes - static_cast(*this) = ndrange_t(sizes); - } - - int_t get_position(int_t d) const - { - assert(d < N); - - return m_positions[d]; - } - - void set_position(int_t d, int_t v) - { - assert(d < N); - - m_positions[d] = v; - } - - int_t get_position_end(int_t d) const - { - return get_position(d) + ndrange_t::get_size(d); - } -}; //class NDCoordinate - -using ndrange_t = NDRange<6>; -using ndcoord_t = NDCoordinate<6>; - -} // namespace arm_gemm diff --git a/src/core/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h b/src/core/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h new file mode 100644 index 0000000000..4b7b092d01 --- /dev/null +++ b/src/core/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H +#define ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H + +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "src/core/NEON/INEKernel.h" +#include "src/core/cpu/kernels/assembly/arm_gemm_compute_iface.hpp" + +#include "gemm_common.hpp" + +namespace arm_compute +{ +class ITensor; + +namespace cpu +{ +namespace kernel +{ +/** This class is a wrapper for the assembly kernels. + * + * Some kernels were written in assembly and highly optimised for specific CPUs like A53 or A55. + * This class works as a wrapper for these assembly kernels. The arm compute library creates an instance + * of CpuGemmAssemblyWrapperKernel and other auxiliary data structures to execute a single assembly kernel + * in the context of an NEFunctions. + * + * The type T is the type of the actual kernel implemented in assembly which is of type + * template class GemmCommon + * + * + */ +template +class CpuGemmAssemblyWrapperKernel final : public INEKernel +{ +public: + /** Constructor + */ + CpuGemmAssemblyWrapperKernel() + : _kernel(nullptr), _name("CpuGemmAssemblyWrapperKernel") + { + } + + CpuGemmAssemblyWrapperKernel(CpuGemmAssemblyWrapperKernel &) = delete; + CpuGemmAssemblyWrapperKernel(CpuGemmAssemblyWrapperKernel &&) = default; + CpuGemmAssemblyWrapperKernel &operator=(CpuGemmAssemblyWrapperKernel &) = delete; + + const char *name() const override + { + return _name.c_str(); + } + + void run(const Window &window, const ThreadInfo &info) override + { + ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast(_kernel))); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + + auto win = arm_gemm::to_ndcoord(window); + + arm_gemm::ndcoord_t thread_locator{}; + + _kernel->execute(win, thread_locator, info.thread_id); + } + + // Inherited methods overridden: + void run_nd(const Window &window, const ThreadInfo &info, const Window &thread_locator) override + { + ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast(_kernel))); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + + //convert between arm_compute and arm_gemm types + auto ndc_win = arm_gemm::to_ndcoord(window); + auto ndc_tlc = arm_gemm::to_ndcoord(thread_locator); + + _kernel->execute(ndc_win, ndc_tlc, info.thread_id); + } + + /** Initialise the kernel's input and output. + * + * @param[in] kernel Pointer to an assembly kernel implementation. + * @param[in] kernel_name_tag Tag to be attacehd to the kernel's name. + */ + void configure(arm_gemm::GemmCommon *kernel, std::string kernel_name_tag) + { + ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast(kernel))); + _kernel = kernel; + + Window win = to_window(kernel->get_window_size()); + + INEKernel::configure(win); + + if(!kernel_name_tag.empty()) + { + _name += "/" + kernel_name_tag; + } + } + +private: + arm_gemm::GemmCommon *_kernel; + std::string _name; +}; +} // namespace kernel +} // namespace cpu +} // namespace arm_compute +#endif /* ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H */ diff --git a/src/core/cpu/kernels/assembly/arm_gemm.hpp b/src/core/cpu/kernels/assembly/arm_gemm.hpp new file mode 100644 index 0000000000..624e9e94dc --- /dev/null +++ b/src/core/cpu/kernels/assembly/arm_gemm.hpp @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#include +#include + +#include "arm_gemm_local.hpp" +#include "gemm_common.hpp" + +namespace arm_gemm +{ +enum class GemmMethod +{ + DEFAULT, + GEMV_BATCHED, + GEMV_PRETRANSPOSED, + GEMV_NATIVE_TRANSPOSED, + GEMM_NATIVE, + GEMM_HYBRID, + GEMM_INTERLEAVED, + GEMM_INTERLEAVED_2D, + QUANTIZE_WRAPPER, + QUANTIZE_WRAPPER_2D, + GEMM_HYBRID_QUANTIZED, + INDIRECT_GEMM, + CONVOLUTION_GEMM +}; + +struct KernelDescription +{ + GemmMethod method = GemmMethod::DEFAULT; + std::string name = ""; + bool is_default = false; + uint64_t cycle_estimate = 0; + + KernelDescription(GemmMethod m, std::string n, bool d = false, uint64_t c = 0) + : method(m), name(n), is_default(d), cycle_estimate(c) + { + } + KernelDescription() noexcept + { + } +}; + +struct GemmConfig +{ + GemmMethod method = GemmMethod::DEFAULT; + std::string filter = ""; + unsigned int inner_block_size = 0; + unsigned int outer_block_size = 0; + + GemmConfig(GemmMethod method) + : method(method) + { + } + GemmConfig() + { + } +}; + +struct Activation +{ + enum class Type + { + None, + ReLU, + BoundedReLU + }; + + Type type; + float param1; + float param2; + + Activation(Type type = Type::None, float p1 = 0.0f, float p2 = 0.0f) + : type(type), param1(p1), param2(p2) + { + } +}; + +struct GemmArgs +{ +public: + const CPUInfo *_ci; + unsigned int _Msize; + unsigned int _Nsize; + unsigned int _Ksize; + unsigned int _Ksections; + unsigned int _nbatches; + unsigned int _nmulti; + bool _indirect_input; + Activation _act; + int _maxthreads; + const GemmConfig *_cfg; + + GemmArgs(const CPUInfo *ci, unsigned int M, unsigned int N, + unsigned int K, unsigned int Ksections, unsigned int nbatches, + unsigned int nmulti, bool indirect_input, Activation act, const int maxthreads, + const GemmConfig *cfg = nullptr) + : _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _Ksections(Ksections), _nbatches(nbatches), _nmulti(nmulti), _indirect_input(indirect_input), _act(act), _maxthreads(maxthreads), _cfg(cfg) + { + } +}; + +struct Requantize32 +{ +public: + const int32_t *bias = nullptr; + size_t bias_multi_stride = 0; + int32_t a_offset = 0; + int32_t b_offset = 0; + int32_t c_offset = 0; + bool per_channel_requant = false; + int32_t per_layer_left_shift = 0; + int32_t per_layer_right_shift = 0; + int32_t per_layer_mul = 0; + const int32_t *per_channel_left_shifts = nullptr; + const int32_t *per_channel_right_shifts = nullptr; + const int32_t *per_channel_muls = nullptr; + int32_t minval = 0; + int32_t maxval = 0; + + Requantize32() = default; + + // Constructor for per-tensor quantization + Requantize32(const int32_t *bias, size_t bias_multi_stride, + int32_t a_offset, int32_t b_offset, int32_t c_offset, + int32_t requant_shift, int32_t requant_mul, int32_t minv, int32_t maxv) + : bias(bias), bias_multi_stride(bias_multi_stride), a_offset(a_offset), b_offset(b_offset), c_offset(c_offset), per_channel_requant(false), per_layer_left_shift(std::max(requant_shift, 0)), + per_layer_right_shift(std::min(requant_shift, 0)), per_layer_mul(requant_mul), minval(minv), maxval(maxv) + { + } + + // Constructor for per-channel quantization + Requantize32(const int32_t *bias, size_t bias_multi_stride, + int32_t a_offset, int32_t b_offset, int32_t c_offset, + const int32_t *requant_left_shifts, + const int32_t *requant_right_shifts, + const int32_t *requant_muls, + int32_t minv, int32_t maxv) + : bias(bias), bias_multi_stride(bias_multi_stride), a_offset(a_offset), b_offset(b_offset), c_offset(c_offset), per_channel_requant(true), per_channel_left_shifts(requant_left_shifts), + per_channel_right_shifts(requant_right_shifts), per_channel_muls(requant_muls), minval(minv), maxval(maxv) + { + } +}; + +struct Nothing +{ +}; + +template +using UniqueGemmCommon = std::unique_ptr>; + +/* Low level API calls. + * These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */ + +/* get_gemm_method(): Given the templated types and provided parameters, + * which is the preferred method to implement this GEMM? */ +template +KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & = {}); + +template +UniqueGemmCommon gemm(const GemmArgs &args, const OutputStage & = {}); + +template +std::vector get_compatible_kernels(const GemmArgs &args, const OutputStage & = {}); + +} // namespace arm_gemm diff --git a/src/core/cpu/kernels/assembly/arm_gemm_compute_iface.hpp b/src/core/cpu/kernels/assembly/arm_gemm_compute_iface.hpp new file mode 100644 index 0000000000..718fcd1fb4 --- /dev/null +++ b/src/core/cpu/kernels/assembly/arm_gemm_compute_iface.hpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2020-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#include "arm_compute/core/Dimensions.h" +#include "arm_compute/core/Window.h" + +#include "ndrange.hpp" + +#include + +/* This file contains mapping between integral types used in arm_compute and arm_gemm + * These two codebases both require a degree of separation for the sake of modularity + * so maintain their own types which represent similar information. + */ + +namespace arm_gemm +{ +//we want to unify the maximum number of dimensions used beween arm_gemm and arm compute library +constexpr std::size_t ndrange_max = + arm_compute::Dimensions::num_max_dimensions; + +using ndrange_t = NDRange; +using ndcoord_t = NDCoordinate; + +/* Converts an `arm_gemm::ndrange_t` to a `arm_compute::Window` + * + * As `NDRange` does not not encode start positions, we specify + * the start to be zero in the produced `arm_compute::Window` + * + * @param [ndr] the `arm_gemm::ndrange_t` we wish to convert into a `arm_compute::Window` + * @returns an `arm_compute::Window` representing the same dimensional ranges as `ndr` + */ +inline arm_compute::Window to_window(const ndrange_t &ndr) +{ + arm_compute::Window win; + + for(unsigned int i = 0; i != ndrange_max; ++i) + { + //populate the window with the dimensions of the NDRange + win.set(i, arm_compute::Window::Dimension(0, ndr.get_size(i))); + } + + return win; +} + +/* + * Converts an `arm_gemm::ndcoord_t` to a `arm_compute::Window` + * + * @param [ndc] the `arm_gemm::ndcoord_t` we wish to convert into a `arm_compute::Window` + * @returns an `arm_compute::Window` representing the same dimensional ranges as `ndc` + */ +inline arm_compute::Window to_window(const ndcoord_t &ndc) +{ + arm_compute::Window win; + + for(unsigned int i = 0; i != ndrange_max; ++i) + { + const auto start = ndc.get_position(i); + const auto size = ndc.get_size(i); + const auto stop = start + size; + + //populate the window with the dimensions of the NDRange + win.set(i, arm_compute::Window::Dimension(start, stop)); + } + + return win; +} + +/** Convert an `arm_compute::Window` to an `arm_gemm::NDRange` of the same max dimensions + * + * It should be noted that `arm_compute::Window` specifies a `start()` and an `end()` + * where as `arm_gemm::ndrange_t` only has a size, as a result we store the delta between the range + * + * @param [win] the `arm_compute::Window` we want to convert to `arm_gemm::ndrange_t` + * @return the resultant ndrange_t + */ +inline ndrange_t to_ndrange(const arm_compute::Window &win) +{ + return + { + static_cast(win[0].end() - win[0].start()), + static_cast(win[1].end() - win[1].start()), + static_cast(win[2].end() - win[2].start()), + static_cast(win[3].end() - win[3].start()), + static_cast(win[4].end() - win[4].start()), + static_cast(win[5].end() - win[5].start()) + }; +} + +/** Convert an `arm_compute::Window` to an `arm_gemm::NDCoord` of the same max dimensions + * + * @param [win] the `arm_compute::Window` we want to convert to `arm_gemm::ndcoord_t` + * @return the resultant ndcoord_t + */ +inline ndcoord_t to_ndcoord(const arm_compute::Window &win) +{ + return + { + { static_cast(win[0].start()), static_cast(win[0].end() - win[0].start()) }, + { static_cast(win[1].start()), static_cast(win[1].end() - win[1].start()) }, + { static_cast(win[2].start()), static_cast(win[2].end() - win[2].start()) }, + { static_cast(win[3].start()), static_cast(win[3].end() - win[3].start()) }, + { static_cast(win[4].start()), static_cast(win[4].end() - win[4].start()) }, + { static_cast(win[5].start()), static_cast(win[5].end() - win[5].start()) } + }; +} + +} //namespace arm_gemm diff --git a/src/core/cpu/kernels/assembly/arm_gemm_local.hpp b/src/core/cpu/kernels/assembly/arm_gemm_local.hpp new file mode 100644 index 0000000000..78e0adf31f --- /dev/null +++ b/src/core/cpu/kernels/assembly/arm_gemm_local.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +/* This file is used to configure integration-specific aspects of arm_gemm into ACL */ + +#include "arm_compute/core/CPP/CPPTypes.h" + +using CPUModel = arm_compute::CPUModel; +using CPUInfo = arm_compute::CPUInfo; diff --git a/src/core/cpu/kernels/assembly/convolution_parameters.hpp b/src/core/cpu/kernels/assembly/convolution_parameters.hpp new file mode 100644 index 0000000000..0c1ae58902 --- /dev/null +++ b/src/core/cpu/kernels/assembly/convolution_parameters.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#include + +namespace arm_gemm +{ +/* + * Parameter set for "convolution" type GEMM. + * + * For a "convolution" GEMM, the GEMM parameters (M, K) are specified as if + * an im2row had been performed on the input tensor to generate the operand + * matrix, but instead this structure describes the convolution parameters + * such that this can be done on the fly. + * + * The parameters describe the convolution details - the notional shape of + * the input and output tensors, whether padding is to be applied, the size + * of the kernel and a constant value to be used for padding (needed for + * quantized tensors). + * + * The second part describes the layout of the input tensor in memory, which + * is assumed to be in NHWC format. This consists of a base pointer and + * strides for columns, rows and batches. 'multis' are not supported for + * convolution type GEMMs. + */ +struct ConvolutionParameters +{ + int64_t input_width; + int64_t input_height; + int64_t input_channels; + int64_t kernel_width; + int64_t kernel_height; + int64_t output_width; + int64_t output_height; + int64_t output_stride_w; + int64_t output_stride_h; + // output_channels not included as they do not affect the input. + int64_t padding_top; + int64_t padding_left; + float padding_value; +}; + +} // namespace arm_gemm diff --git a/src/core/cpu/kernels/assembly/gemm_common.hpp b/src/core/cpu/kernels/assembly/gemm_common.hpp new file mode 100644 index 0000000000..4af85ed663 --- /dev/null +++ b/src/core/cpu/kernels/assembly/gemm_common.hpp @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2017-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#include "convolution_parameters.hpp" +#include "ndrange.hpp" + +#include + +namespace arm_gemm +{ +// Abstract class for the GEMM/GEMV functions. +// +// GEMM implementations may be "native" (never require any input +// permutation), "pretransposed" (require permutation up-front) or require +// working space (permute as they go along). This interface should support +// all of them. + +// The real GemmCommon class is templated based on the operand and return +// type. This is an interface class which is independent of those types. +class IGemmCommon +{ +public: + /* Pass in the pointers to the arrays to be operated on and their + * strides. This "generic" version uses void *s, the preferred version + * is the one provided by templated GemmCommon (below) which takes + * appropriately typed pointers. If B is pretransposed (see below) then + * the settings for B here are ignored. + */ + virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const void *B, const int ldb, /* batches share B */ const int B_multi_stride, + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, + const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0; + + /** @returns an ndrange containing ranges of the compute space which can be + * broken up and parallelised over + */ + virtual ndrange_t get_window_size() const = 0; + + /* The maximum thread count is specified when the GEMM is created. Some + * implementations need to know how many threads will actually run in + * order to work properly. + * + * In some cases, after creating the GEMM the number of threads needs to + * be reduced (e.g. not enough work to split across threads). This + * method allows the number of actual threads to be run to be set (must + * be equal or lower). + * + * This has an empty default implementation, as GEMMs which don't care + * about thread count can safely ignore this. + */ + virtual void set_nthreads(int) {}; + + /* Whether this GEMM can be dynamically scheduled or not. */ + virtual bool supports_dynamic_scheduling() const + { + return false; + } + + /** Main execute member fucntion + * @param [in] work_range specifies the range of work we want to be computed, total range defined by get_window_size() + * @param [in] thread_locator where are we inside of the thread space + * @param [in] threadid a unique threadid + */ + virtual void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) = 0; + + /*** Working space interface (optional) ***/ + /* Total number of bytes of temporary working space needed. If zero, it's not necessary to call set_working_space(). */ + virtual size_t get_working_size() const + { + return 0; + } + /* Provide working space buffer - the void * passed in must remain allocated for the duration of any execute calls. */ + virtual void set_working_space(void *) {}; + + /*** "Pretransposed" interface (optional) ***/ + /* Is this object set up for pretranspose? If so, pretranspose_array() needs to be called before execute(); */ + virtual bool B_is_pretransposed() const + { + return false; + } + /* Does pretranspose still need to be done? */ + virtual bool B_pretranspose_required() const + { + return false; + } + /* Total number of bytes of space needed for pretransposed arrays. */ + virtual size_t get_B_pretransposed_array_size() const + { + return 0; + } + /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */ + /* The "real" version of this depends on the templated operand type (see below). */ + virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0; + /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */ + virtual void set_pretransposed_B_data(void *) + { + } + + /*** "Quantized bias" interface (optional) ***/ + /* Set the bias vector for quantized GEMMs */ + virtual void set_quantized_bias(const int32_t *, size_t) + { + } + + /*** Indirect interface (optional) ***/ + /* Set the indirect table. This comprises a number of values per kernel point, and a densely packed array of pointers, + * multis * batches * kernel_points */ + virtual void set_indirect_parameters_generic(size_t, const void *const *const *) + { + } + + /*** Convolution interface (optional) ***/ + /* Set the convolution parameters. */ + virtual void set_convolution_parameters(ConvolutionParameters) + { + } + + // Destructor + virtual ~IGemmCommon() + { + } +}; + +/* "Real" GemmCommon class which is templated on the operand and return types. + * + * In addition to correctly typed versions of the functions that operate on + * operand and return data, this class provides a default implementation of + * 'set_arrays' to capture the provided arguments in protected class + * members, as essentially any implementation will need these. + */ +template +class GemmCommon : public IGemmCommon +{ +protected: + const To *_Aptr = nullptr; + int _lda = 0; + int _A_batch_stride = 0; + int _A_multi_stride = 0; + const To *_Bptr = nullptr; + int _ldb = 0; + int _B_multi_stride = 0; + Tr *_Cptr = nullptr; + int _ldc = 0; + int _C_batch_stride = 0; + int _C_multi_stride = 0; + const Tr *_bias = nullptr; + int _bias_multi_stride = 0; + +public: + /* Pass in the pointers to the arrays to be operated on and their + * strides (templated version with appropriate types). */ + virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const To *B, const int ldb, /* batches share B */ const int B_multi_stride, + Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, + const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride) + { + _Aptr = A; + _lda = lda; + _A_batch_stride = A_batch_stride; + _A_multi_stride = A_multi_stride; + _Bptr = B; + _ldb = ldb; + _B_multi_stride = B_multi_stride; + _Cptr = C; + _ldc = ldc; + _C_batch_stride = C_batch_stride; + _C_multi_stride = C_multi_stride; + _bias = bias; + _bias_multi_stride = bias_multi_stride; + } + + /* Implementation of the void * overload which casts its arguments to the appropriate type. */ + void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const void *B, const int ldb, /* batches share B */ const int B_multi_stride, + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, + const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override + { + set_arrays(static_cast(A), lda, A_batch_stride, A_multi_stride, + static_cast(B), ldb, B_multi_stride, + static_cast(C), ldc, C_batch_stride, C_multi_stride, + static_cast(bias), bias_multi_stride); + } + + /*** "Pretransposed" interface ***/ + + /* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */ + /* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */ + virtual void pretranspose_B_array(void *, const To *, const int, const int) {}; + + /* Implementation of the void * overload which casts its arguments to the appropriate type. */ + void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override + { + pretranspose_B_array(out, static_cast(in), row_stride, multi_stride); + } + + /*** Indirect interface ***/ + virtual void set_indirect_parameters(size_t, const To *const *const *) + { + } + + void set_indirect_parameters_generic(size_t sz, const void *const *const *ptr) override + { + set_indirect_parameters(sz, reinterpret_cast(ptr)); + } +}; + +} // namespace arm_gemm diff --git a/src/core/cpu/kernels/assembly/ndrange.hpp b/src/core/cpu/kernels/assembly/ndrange.hpp new file mode 100644 index 0000000000..1c8261aef7 --- /dev/null +++ b/src/core/cpu/kernels/assembly/ndrange.hpp @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2019-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#include +#include +#include +#include + +namespace arm_gemm +{ +template +class NDRange +{ +private: + std::array m_sizes{}; + std::array m_totalsizes{}; + + class NDRangeIterator + { + private: + const NDRange &m_parent; + unsigned int m_pos = 0; + unsigned int m_end = 0; + + public: + NDRangeIterator(const NDRange &p, unsigned int s, unsigned int e) + : m_parent(p), m_pos(s), m_end(e) + { + } + + bool done() const + { + return (m_pos >= m_end); + } + + unsigned int dim(unsigned int d) const + { + unsigned int r = m_pos; + + if(d < (D - 1)) + { + r %= m_parent.m_totalsizes[d]; + } + + if(d > 0) + { + r /= m_parent.m_totalsizes[d - 1]; + } + + return r; + } + + bool next_dim0() + { + m_pos++; + + return !done(); + } + + bool next_dim1() + { + m_pos += m_parent.m_sizes[0] - dim(0); + + return !done(); + } + + unsigned int dim0_max() const + { + unsigned int offset = std::min(m_end - m_pos, m_parent.m_sizes[0] - dim(0)); + + return dim(0) + offset; + } + }; + + void set_totalsizes() + { + unsigned int t = 1; + + for(unsigned int i = 0; i < D; i++) + { + if(m_sizes[i] == 0) + { + m_sizes[i] = 1; + } + + t *= m_sizes[i]; + + m_totalsizes[i] = t; + } + } + +public: + NDRange &operator=(const NDRange &rhs) = default; + NDRange(const NDRange &rhs) = default; + + template + NDRange(T... ts) + : m_sizes{ ts... } + { + set_totalsizes(); + } + + NDRange(const std::array &n) + : m_sizes(n) + { + set_totalsizes(); + } + + NDRangeIterator iterator(unsigned int start, unsigned int end) const + { + return NDRangeIterator(*this, start, end); + } + + unsigned int total_size() const + { + return m_totalsizes[D - 1]; + } + + unsigned int get_size(unsigned int v) const + { + return m_sizes[v]; + } +}; + +/** NDCoordinate builds upon a range, but specifies a starting position + * in addition to a size which it inherits from NDRange + */ +template +class NDCoordinate : public NDRange +{ + using int_t = unsigned int; + using ndrange_t = NDRange; + + std::array m_positions{}; + +public: + NDCoordinate &operator=(const NDCoordinate &rhs) = default; + NDCoordinate(const NDCoordinate &rhs) = default; + NDCoordinate(const std::initializer_list> &list) + { + std::array sizes{}; + + std::size_t i = 0; + for(auto &p : list) + { + m_positions[i] = p.first; + sizes[i++] = p.second; + } + + //update the parents sizes + static_cast(*this) = ndrange_t(sizes); + } + + int_t get_position(int_t d) const + { + assert(d < N); + + return m_positions[d]; + } + + void set_position(int_t d, int_t v) + { + assert(d < N); + + m_positions[d] = v; + } + + int_t get_position_end(int_t d) const + { + return get_position(d) + ndrange_t::get_size(d); + } +}; //class NDCoordinate + +using ndrange_t = NDRange<6>; +using ndcoord_t = NDCoordinate<6>; + +} // namespace arm_gemm diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp index 6d83480cb9..b84128e6c0 100644 --- a/src/runtime/NEON/functions/NEGEMM.cpp +++ b/src/runtime/NEON/functions/NEGEMM.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -38,7 +38,7 @@ #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" #include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/helpers/AutoConfiguration.h" -#include "src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" +#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" #include @@ -48,10 +48,10 @@ namespace arm_compute { namespace { -AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) +cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) { - AsmGemmInfo asm_info; - asm_info.method = AsmConvMethod::Im2Col; + cpu::AsmGemmInfo asm_info; + asm_info.method = cpu::AsmConvMethod::Im2Col; asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d(); asm_info.depth_output_gemm3d = info.depth_output_gemm3d(); asm_info.activation_info = info.activation_info(); @@ -61,7 +61,7 @@ AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) } // namespace NEGEMM::NEGEMM(std::shared_ptr memory_manager, IWeightsManager *weights_manager) - : _memory_group(memory_manager), _weights_manager(weights_manager), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _asm_glue(std::make_unique()), _ma_kernel(), + : _memory_group(memory_manager), _weights_manager(weights_manager), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _asm_glue(std::make_unique()), _ma_kernel(), _alpha_scale_func(nullptr), _add_bias(), _activation_func(), _tmp_a(), _tmp_b(), _tmp_d(), _original_b(nullptr), _run_vector_matrix_multiplication(false), _run_alpha_scale(false), _run_addition(false), _run_bias_addition(false), _run_activation(false), _reshape_b_only_on_first_run(false), _is_prepared(false) { @@ -73,9 +73,9 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe { ARM_COMPUTE_ERROR_THROW_ON(NEGEMM::validate(a->info(), b->info(), (c != nullptr) ? c->info() : nullptr, d->info(), alpha, beta, gemm_info)); - const AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); - const bool is_c_bias = gemm_info.reshape_b_only_on_first_run(); - bool run_optimised = bool(NEGEMMAssemblyDispatch::validate(a->info(), b->info(), (is_c_bias && c != nullptr) ? c->info() : nullptr, d->info(), asm_info)); + const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); + const bool is_c_bias = gemm_info.reshape_b_only_on_first_run(); + bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a->info(), b->info(), (is_c_bias && c != nullptr) ? c->info() : nullptr, d->info(), asm_info)); // Check if we need to reshape the matrix B only on the first run _is_prepared = false; @@ -85,7 +85,7 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe _run_alpha_scale = alpha != 1.f; _run_bias_addition = c != nullptr && gemm_info.reshape_b_only_on_first_run(); _run_addition = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run(); - _run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised && !NEGEMMAssemblyDispatch::is_activation_supported(gemm_info.activation_info()))); + _run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info()))); if(run_optimised) { @@ -235,8 +235,8 @@ Status NEGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso } // Check if we need to run the optimized assembly kernel - AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); - const bool run_optimised = bool(NEGEMMAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, output, asm_info)); + cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); + const bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, output, asm_info)); if(!run_optimised) { diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp deleted file mode 100644 index c58a662f10..0000000000 --- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp +++ /dev/null @@ -1,860 +0,0 @@ -/* - * Copyright (c) 2018-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" - -#include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/core/CPP/Validate.h" -#include "src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h" -#include "src/core/NEON/kernels/assembly/arm_gemm.hpp" - -#include -#include - -namespace arm_compute -{ -namespace -{ -struct free_delete -{ - void operator()(void *x) - { - free(x); - } -}; - -struct Params -{ - unsigned int M; - unsigned int N; - unsigned int K; - unsigned int batches; - unsigned int multis; - unsigned int sections; - bool indirect; -}; - -Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *d, const AsmGemmInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d); - - Params p; - p.M = d->info()->tensor_shape().y(); - p.K = a->info()->tensor_shape().x(); - p.N = d->info()->tensor_shape().x(); - p.batches = 1; - p.multis = 1; - p.sections = 1; - p.indirect = false; - - if(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect) - { - p.indirect = true; - p.sections = b->info()->tensor_shape()[2] * b->info()->tensor_shape()[3]; - } - else - { - p.multis = b->info()->tensor_shape().z(); - p.batches = d->info()->tensor_shape().total_size_upper(2) / p.multis; - } - - // Update M in case of GEMM3D for output - if(info.depth_output_gemm3d != 0) - { - p.M = d->info()->tensor_shape().y() * d->info()->tensor_shape().z(); - p.batches = d->info()->tensor_shape().total_size_upper(3) / p.multis; - } - - return p; -} - -arm_gemm::Activation map_to_arm_gemm_activation(const ActivationLayerInfo &act) -{ - arm_gemm::Activation gemm_act; - - // Early exit in case lower bound is other than 0, as it's not yet supported - if(act.b() != 0.f) - { - return gemm_act; - } - - switch(act.activation()) - { - case ActivationLayerInfo::ActivationFunction::RELU: - gemm_act.type = arm_gemm::Activation::Type::ReLU; - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - gemm_act.type = arm_gemm::Activation::Type::BoundedReLU; - gemm_act.param1 = act.a(); - gemm_act.param2 = 0.f; - break; - case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU: - gemm_act.type = arm_gemm::Activation::Type::BoundedReLU; - gemm_act.param1 = act.a(); - gemm_act.param2 = act.b(); - break; - default: - gemm_act.type = arm_gemm::Activation::Type::None; - } - - return gemm_act; -} - -IScheduler::Hints scheduling_hint_heuristic(arm_gemm::GemmMethod method, DataType data_type) -{ - // Schedule assembly kernel - const int granule_threshold = 200; - IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX); - if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && data_type == DataType::F32) - { - scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold); - } - else if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (data_type == DataType::F32 || data_type == DataType::F16 || data_type == DataType::U8 || data_type == DataType::S8)) - { - //GEMM_INTERLEAVED supports 2D parallelism, IScheduler::split_dimensions_all signals to parallelise over all window dimensions - scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold); - } - else if(method == arm_gemm::GemmMethod::QUANTIZE_WRAPPER_2D && (data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED)) - { - //special case for QASYMM8 to support 2D parallelism, scheduler here may be tweaked differently compared to FP32 case - scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold); - } - - return scheduling_hint; -} - -template -class FallbackTransform : public ITransformWeights -{ -public: - FallbackTransform() noexcept {}; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - FallbackTransform(const FallbackTransform &) = delete; - /** Default move constructor */ - FallbackTransform(FallbackTransform &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - FallbackTransform &operator=(const FallbackTransform &) = delete; - /** Default move assignment operator */ - FallbackTransform &operator=(FallbackTransform &&) = default; - void run() override - { - _output.allocator()->allocate(); - ARM_COMPUTE_ERROR_ON(_output.buffer() == nullptr); - _gemm_kernel_asm->pretranspose_B_array(_output.buffer(), _in1_ptr, _ldb, _multi_stride_b); - _reshape_run = true; - } - - void release() override - { - _output.allocator()->free(); - } - - ITensor *get_weights() override - { - return &_output; - } - - uint32_t uid() override - { - uint32_t id = (_B_pretranspose_size | 0x80000000); - return id; - } - - void configure(size_t B_pretranspose_size, unsigned int alignment) - { - _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment) }, 1, DataType::S8), alignment); - _B_pretranspose_size = B_pretranspose_size; - } - - void set_pretranspose(ITensor *tensor) - { - if(!_reshape_run) - { - _gemm_kernel_asm->set_pretransposed_B_data(tensor->buffer()); - } - } - - void set_args(const int ldb, const TypeInput *in1_ptr, const int multi_stride_b, std::shared_ptr> gemm_kernel_asm) - { - _ldb = ldb; - _in1_ptr = in1_ptr; - _multi_stride_b = multi_stride_b; - _gemm_kernel_asm = gemm_kernel_asm; - } - -private: - Tensor _output{}; - int _ldb{}; - const TypeInput *_in1_ptr{}; - int _multi_stride_b{}; - size_t _B_pretranspose_size{}; - std::shared_ptr> _gemm_kernel_asm{ nullptr }; -}; - -/** Fallback in case ACL doesn't have a function */ -template -class Fallback : public NEGEMMAssemblyDispatch::IFallback -{ -public: - /** Destructor */ - ~Fallback() - { - // Release memory if we have allocated the memory ourselves - if(_pretranspose && !(_weights_manager && _weights_manager->are_weights_managed(_b))) - { - delete _pretranspose; - } - } - - /** Initialise the functions's input and output. - * - * @param[in] a Input tensor containing the Matrix A. - * @param[in] b Input tensor containing the Matrix B. - * @param[in] c Input tensor containing the Matrix C. - * @param[out] d Output tensor to store the result of matrix multiplication. - * @param[in] args Matrix multiplication information. - * @param[in] gemm_info GEMM meta-data - * @param[in] memory_group Memory group to be used by the function. - * @param[in] weights_manager Weights manager to be used by the function. - * @param[in] os Output stage meta-data. - */ - void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, - arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info, - MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os = {}); - - /** Set requantization shifts to be used - * - * @param[in] shifts Requantization shifts - * - * @return Pointer to the shift data - */ - /** Set requantization data to be used - * - * - * @param shifts Requantization shifts - * @param multipliers Requantization multipliers - * - * @return A tuple with the pointers to the shift and multiplier data respectively - */ - std::tuple set_requantize_data(const std::vector &shifts, - const std::vector &multipliers); - - // Inherited methods overridden: - void run() override; - void prepare() override; - bool is_configured() const override; - -private: - /** Allocate a workspace tensor. - * - * @param[in] workspace_size Size to allocate. - * @param[in] memory_group Tensor memory group. - * @param[in] alignment Workspace memory alignment. - */ - void allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment); - /** Configure the indirect buffer - * - * @param[in] a Input tensor containing the Matrix A. - * @param[in] b Input tensor containing the Matrix B. - * @param[out] d Output tensor to store the result of matrix multiplication. - * @param[in] info GEMM meta-data - */ - void configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info); - /** Prepare the indirect buffer */ - void prepare_indirect_buffer(); - - /** Assembly Gemm kernel */ - std::shared_ptr> _gemm_kernel_asm{ nullptr }; - /** Optimised Arm® Neon™ kernel */ - std::unique_ptr _optimised_kernel{ nullptr }; - /** Input A */ - const ITensor *_a - { - nullptr - }; - /** Input B */ - const ITensor *_b - { - nullptr - }; - const ITensor *_c - { - nullptr - }; - /** Output */ - ITensor *_d{ nullptr }; - /** GEMM workspace */ - Tensor _workspace{}; - /** Pre-transpose tensor */ - ITensor *_pretranspose{ nullptr }; - /** Prepared flag */ - bool _is_prepared{ false }; - /** GEMM meta-data */ - AsmGemmInfo _gemm_info{}; - /** Weights manager */ - IWeightsManager *_weights_manager{ nullptr }; - /** Weights transform object */ - FallbackTransform _weights_transform{}; - /** GEMM kernel description */ - arm_gemm::KernelDescription _kernel_info{}; - /** Per channel quantization shifts */ - std::vector _shifts{}; - std::vector right_shifts{}; - std::vector left_shifts{}; - /** Per channel quantization multipliers */ - std::vector _multipliers{}; - /** Indirect buffer */ - std::unique_ptr _indirect_arg{}; - std::unique_ptr _indirect_buf{}; - std::vector _indirect_pad{}; - arm_gemm::ConvolutionParameters _cp{}; -}; - -template -std::tuple -Fallback::set_requantize_data(const std::vector &shifts, const std::vector &multipliers) -{ - _multipliers = multipliers; - _shifts = shifts; - bool need_left = false; - for(const auto s : _shifts) - { - left_shifts.push_back(std::max(-s, int32_t(0))); - right_shifts.push_back(std::min(-s, int32_t(0))); - if(s < 0 && !need_left) - { - need_left = true; - } - } - return std::make_tuple(need_left, left_shifts.data(), right_shifts.data(), _multipliers.data()); -} - -template -void Fallback::prepare_indirect_buffer() -{ - const TypeInput *A_ptr = reinterpret_cast(_a->buffer()); - const int multis = 1; - const int batches = _a->info()->tensor_shape().total_size_upper(3); - const size_t stride_A = _a->info()->strides_in_bytes().y() / sizeof(TypeInput); - const size_t batch_stride_A = _a->info()->strides_in_bytes()[3] / sizeof(TypeInput); - const size_t multi_stride_A = _a->info()->strides_in_bytes()[4] / sizeof(TypeInput); - - const size_t output_hw = _cp.output_height * _cp.output_width; - const int batch_size = _cp.kernel_height * _cp.kernel_width * output_hw * sizeof(TypeInput); - const size_t batch_stride = batch_size / sizeof(TypeInput); - const int multi_size = batch_size * batches; - const size_t multi_stride = multi_size / sizeof(TypeInput); - - for(int64_t m = 0; m < multis; m++) - { - for(int64_t b = 0; b < batches; b++) - { - for(int64_t output_y = 0; output_y < _cp.output_height; output_y++) - { - for(int64_t output_x = 0; output_x < _cp.output_width; output_x++) - { - int64_t output_xy = (output_y * _cp.output_width) + output_x; - - for(int64_t kernel_y = 0; kernel_y < _cp.kernel_height; kernel_y++) - { - for(int64_t kernel_x = 0; kernel_x < _cp.kernel_width; kernel_x++) - { - int64_t input_x = (output_x * _cp.output_stride_w) + kernel_x - _cp.padding_left; - int64_t input_y = (output_y * _cp.output_stride_h) + kernel_y - _cp.padding_top; - int64_t kernel_xy = (kernel_y * _cp.kernel_width) + kernel_x; - int64_t input_xy = (input_y * _cp.input_width) + input_x; - - if(input_x < 0 || input_x >= _cp.input_width || input_y < 0 || input_y >= _cp.input_height) - { - _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] = _indirect_pad.data(); - } - else - { - _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] = - A_ptr + (m * multi_stride_A + b * batch_stride_A + input_xy * stride_A); - } - } - } - } - } - } - } -} - -template -void Fallback::configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info) -{ - ARM_COMPUTE_ERROR_ON(!(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect)); - - float zeropad = 0.f; - if(is_data_type_quantized(a->data_type())) - { - zeropad = a->quantization_info().uniform().offset; - } - - const int64_t input_width = static_cast(a->tensor_shape()[1]); - const int64_t input_height = static_cast(a->tensor_shape()[2]); - const int64_t input_channels = static_cast(a->tensor_shape()[0]); - const int64_t kernel_width = static_cast(b->tensor_shape()[2]); - const int64_t kernel_height = static_cast(b->tensor_shape()[3]); - const int64_t output_width = static_cast(d->tensor_shape()[1]); - const int64_t output_height = static_cast(d->tensor_shape()[2]); - - _cp = { input_width, input_height, input_channels, kernel_width, kernel_height, output_width, output_height, - info.ps_info.stride().first, info.ps_info.stride().second, info.padding_top, info.padding_left, zeropad - }; - - if(info.method == AsmConvMethod::Conv) - { - _gemm_kernel_asm->set_convolution_parameters(_cp); - } - - if(info.method == AsmConvMethod::Indirect) - { - const unsigned int multis = 1; - const unsigned int batches = a->tensor_shape().total_size_upper(3); - const unsigned int kernel_hw = _cp.kernel_width * _cp.kernel_height; - const unsigned int output_hw = _cp.output_width * _cp.output_height; - - using TypeInputPtr = TypeInput *; - const int batch_size = kernel_hw * output_hw * sizeof(TypeInputPtr); - const size_t batch_stride = batch_size / sizeof(TypeInputPtr); - const int multi_size = batch_size * batches; - const size_t multi_stride = multi_size / sizeof(TypeInputPtr); - - _indirect_buf = std::unique_ptr(reinterpret_cast(malloc(multi_size * multis))); - _indirect_arg = std::unique_ptr(reinterpret_cast(malloc(sizeof(TypeInput **) * kernel_hw * multis * batches))); - _indirect_pad = std::vector(_cp.input_channels, TypeInput(zeropad)); - - // Set indirect argument - int64_t pos = 0; - for(int64_t m = 0; m < multis; m++) - { - for(int64_t b = 0; b < batches; b++) - { - for(int64_t kernel_xy = 0; kernel_xy < kernel_hw; kernel_xy++) - { - (_indirect_arg.get())[pos++] = _indirect_buf.get() + m * multi_stride + b * batch_stride + kernel_xy * output_hw; - } - } - } - - _gemm_kernel_asm->set_indirect_parameters(a->tensor_shape()[0], _indirect_arg.get()); - } -} - -template -void Fallback::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, - arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info, - MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os) -{ - arm_gemm::GemmConfig gemm_cfg; - _kernel_info = arm_gemm::get_gemm_method(args, os); - _weights_manager = weights_manager; - if(_kernel_info.method != arm_gemm::GemmMethod::GEMV_BATCHED) - { - gemm_cfg.filter = _kernel_info.name; - args._cfg = &gemm_cfg; - } - _gemm_kernel_asm = arm_gemm::gemm(args, os); - if(_gemm_kernel_asm == nullptr) - { - //configuration not supported: Leave function unconfigured: - return; - } - - // arm_compute wrapper for the Gemm object (see above) - std::unique_ptr> acl_gemm_wrapper = std::make_unique>(); - ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr); - acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter); - const size_t workspace_size = _gemm_kernel_asm->get_working_size(); - if(workspace_size > 0) - { - // Allocate workspace - const unsigned int alignment = 4096; - allocate_workspace(workspace_size, memory_group, alignment); - } - - //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and - //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001 - { - const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size(); - if(window_size < static_cast(args._maxthreads)) - { - _gemm_kernel_asm->set_nthreads(window_size); - } - } - - _optimised_kernel = std::move(acl_gemm_wrapper); - _a = a; - _b = b; - _c = c; - _d = d; - _gemm_info = gemm_info; - // Check for pre-transposed support - if(_gemm_kernel_asm->B_pretranspose_required()) - { - // Forcing 128-byte alignment (required by 32-bit kernels) - const unsigned int alignment = 128; - const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size(); - if(weights_manager && _weights_manager->are_weights_managed(b)) - { - _weights_transform.configure(B_pretranspose_size, alignment); - _pretranspose = _weights_manager->acquire(b, &_weights_transform); - } - else - { - _pretranspose = new Tensor(); - static_cast(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment) }, 1, DataType::S8), alignment); - } - } - - // Handle indirect GEMM convolution - if(gemm_info.method == AsmConvMethod::Conv || gemm_info.method == AsmConvMethod::Indirect) - { - configure_indirect(a->info(), b->info(), d->info(), gemm_info); - } -} - -template -void Fallback::prepare() -{ - if(!_is_prepared) - { - // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C. - if(_c && _c->info()->data_type() == DataType::S32) - { - _gemm_kernel_asm->set_quantized_bias(reinterpret_cast(_c->buffer() + _c->info()->offset_first_element_in_bytes()), 0); - } - - // Pretranspose B if required - if(_gemm_kernel_asm->B_pretranspose_required()) - { - const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput); - const auto in1_ptr = reinterpret_cast(_b->buffer() + _b->info()->offset_first_element_in_bytes()); - const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput); - - if(_weights_manager && _weights_manager->are_weights_managed(_b)) - { - _weights_transform.set_args(ldb, in1_ptr, multi_stride_b, _gemm_kernel_asm); - _weights_manager->run(_b, &_weights_transform); - - // If we didn't run the reshape function, set the pretransposed buffer - if(!_weights_transform.is_reshape_run()) - { - _weights_transform.set_pretranspose(_pretranspose); - } - } - else - { - static_cast(_pretranspose)->allocator()->allocate(); - ARM_COMPUTE_ERROR_ON(_pretranspose->buffer() == nullptr); - _gemm_kernel_asm->pretranspose_B_array(_pretranspose->buffer(), in1_ptr, ldb, multi_stride_b); - _b->mark_as_unused(); - } - } - - if(_gemm_info.method == AsmConvMethod::Indirect) - { - prepare_indirect_buffer(); - } - - _is_prepared = true; - } -} - -template -void Fallback::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment) -{ - ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0"); - _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment) }, 1, DataType::S8), alignment); - memory_group.manage(&_workspace); - _workspace.allocator()->allocate(); -} - -template -bool Fallback::is_configured() const -{ - return _optimised_kernel != nullptr; -} - -template -void Fallback::run() -{ - int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput); - int ldb = 0; - const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput); - - const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d != 0 ? 3 : 2; - const size_t a_multi_idx = a_batch_idx + 1; - const size_t d_batch_idx = _gemm_info.depth_output_gemm3d != 0 ? 3 : 2; - const size_t d_multi_idx = d_batch_idx + 1; - - int batch_stride_a = _a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput); - const int batch_stride_d = _d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput); - - int multi_stride_a = _a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput); - int multi_stride_b = 0; - const int multi_stride_d = _d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput); - - auto in0_ptr = reinterpret_cast(_a->buffer() + _a->info()->offset_first_element_in_bytes()); - const TypeInput *in1_ptr = nullptr; - auto out_ptr = reinterpret_cast(_d->buffer() + _d->info()->offset_first_element_in_bytes()); - - // Check if B is pre-tranposed and de-reference if not - if(!_gemm_kernel_asm->B_is_pretransposed()) - { - ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput); - multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput); - in1_ptr = reinterpret_cast(_b->buffer() + _b->info()->offset_first_element_in_bytes()); - } - - const auto scheduling_hint = scheduling_hint_heuristic(_kernel_info.method, _d->info()->data_type()); - - // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads - if(_workspace.buffer() != nullptr) - { - _gemm_kernel_asm->set_working_space(reinterpret_cast(_workspace.buffer())); - const unsigned int split_dim = scheduling_hint.split_dimension(); - const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size(); - unsigned int num_threads = NEScheduler::get().num_threads(); - if(window_size < num_threads) - { - num_threads = window_size; - } - if(split_dim != IScheduler::split_dimensions_all) - { - // Make sure the kernel does not expect more threads than we can actually spawn - const unsigned int num_iterations = _optimised_kernel.get()->window().num_iterations(split_dim); - num_threads = std::min(num_iterations, num_threads); - } - _gemm_kernel_asm->set_nthreads(num_threads); - } - - // Prepare assembly kernel - prepare(); - - // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C. - TypeOutput *bias = nullptr; - if(_c && _c->info()->data_type() != DataType::S32) - { - bias = reinterpret_cast(_c->buffer() + _c->info()->offset_first_element_in_bytes()); - } - - if(_gemm_info.method == AsmConvMethod::Indirect) - { - in0_ptr = nullptr; - lda = 0; - batch_stride_a = 0; - multi_stride_a = 0; - } - - // Set gemm parameters - _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a, - in1_ptr, ldb, multi_stride_b, - out_ptr, ldd, batch_stride_d, multi_stride_d, - bias, 0); - // Schedule - NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint); -} - -template -void create_arm_gemm(std::unique_ptr &arm_gemm, MemoryGroup &memory_group, - const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const AsmGemmInfo &info, - IWeightsManager *weights_manager) -{ - Params p = extract_parameters(a, b, d, info); - const CPUInfo &ci = NEScheduler::get().cpu_info(); - unsigned int num_threads = NEScheduler::get().num_threads(); - - arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads); - - // Create arm_gemm fallback - auto fallback = std::make_unique>(); - fallback->configure(a, b, c, d, args, info, memory_group, weights_manager); - arm_gemm = std::move(fallback); -} - -template -void create_arm_gemm_quant(std::unique_ptr &arm_gemm, MemoryGroup &memory_group, - const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const AsmGemmInfo &info, - IWeightsManager *weights_manager) -{ - ARM_COMPUTE_UNUSED(activation); - Params p = extract_parameters(a, b, d, info); - const CPUInfo &ci = NEScheduler::get().cpu_info(); - unsigned int num_threads = NEScheduler::get().num_threads(); - - arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads); - - // Create arm_gemm fallback - auto fallback = std::make_unique>(); - - // Configure requantization info - const int32_t negation = info.negated_offsets ? 1 : -1; - const int32_t a_offset = -a->info()->quantization_info().uniform().offset * negation; - const int32_t b_offset = -b->info()->quantization_info().uniform().offset * negation; - const GEMMLowpOutputStageInfo os_info = info.output_stage; - - arm_gemm::Requantize32 gemm_requant_info{}; - if(os_info.gemmlowp_shifts.size() > 1) - { - const auto requantize_data = fallback->set_requantize_data(os_info.gemmlowp_shifts, os_info.gemmlowp_multipliers); - gemm_requant_info = arm_gemm::Requantize32(nullptr, 0, - a_offset, b_offset, os_info.gemmlowp_offset, - (std::get<0>(requantize_data)) ? std::get<1>(requantize_data) : nullptr, - std::get<2>(requantize_data), - std::get<3>(requantize_data), - os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound); - } - else - { - gemm_requant_info = arm_gemm::Requantize32(nullptr, 0, - a_offset, b_offset, os_info.gemmlowp_offset, - -os_info.gemmlowp_shift, os_info.gemmlowp_multiplier, - os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound); - } - - // Configure fallback - fallback->configure(a, b, c, d, args, info, memory_group, weights_manager, gemm_requant_info); - arm_gemm = std::move(fallback); -} - -} //namespace - -NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr memory_manager, IWeightsManager *weights_manager) - : _arm_gemm(nullptr), _memory_group(std::move(memory_manager)), _weights_manager(weights_manager) -{ -} - -Status NEGEMMAssemblyDispatch::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info) -{ - ARM_COMPUTE_UNUSED(c, info); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(a, b, d); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a); - -#ifndef __aarch64__ - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->element_size() == 1, "8bit integer types only supported for aarch64"); -#endif /* __aarch64__ */ - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S8, - DataType::BFLOAT16, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::S8, - DataType::BFLOAT16, DataType::F16, DataType::F32); - if(is_data_type_quantized_per_channel(b->data_type())) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8_SIGNED, DataType::S8); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); - } - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && d->data_type() != DataType::F32, "Only F32 output supported for BFLOAT16 input"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::QASYMM8, "Only QASYMM8 output supported for QASYMM8 input"); - return Status{}; -} - -bool NEGEMMAssemblyDispatch::is_activation_supported(const ActivationLayerInfo &activation) -{ - arm_gemm::Activation act = map_to_arm_gemm_activation(activation); - return act.type != arm_gemm::Activation::Type::None; -} - -void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const AsmGemmInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d); - arm_gemm::Activation act = map_to_arm_gemm_activation(info.activation_info); - - //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured() - if(!NEGEMMAssemblyDispatch::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, d->info(), info)) - { - return; - } - - switch(a->info()->data_type()) - { - case DataType::F32: - create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - break; -#ifdef __aarch64__ - case DataType::U8: - case DataType::QASYMM8: - if(d->info()->data_type() == DataType::S32) - { - create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - } - else - { - create_arm_gemm_quant(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - } - break; - case DataType::S8: - case DataType::QASYMM8_SIGNED: - if(d->info()->data_type() == DataType::S32) - { - create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - } - else - { - create_arm_gemm_quant(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - } - break; -#endif /* __aarch64__ */ -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) - case DataType::BFLOAT16: - create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - break; -#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); - break; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - default: - break; - } -} - -void NEGEMMAssemblyDispatch::prepare() -{ - ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr); - _arm_gemm->prepare(); -} - -bool NEGEMMAssemblyDispatch::is_configured() const -{ - return _arm_gemm != nullptr && _arm_gemm->is_configured(); -} - -void NEGEMMAssemblyDispatch::run() -{ - MemoryGroupResourceScope scope_mg(_memory_group); - - ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr); - _arm_gemm->run(); -} -} //namespace arm_compute diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h deleted file mode 100644 index 381fa4de31..0000000000 --- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2018-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef SRC_NEGEMMASSEMBLYDISPATCH_H -#define SRC_NEGEMMASSEMBLYDISPATCH_H - -#include "arm_compute/runtime/IFunction.h" -#include "arm_compute/runtime/IMemoryManager.h" -#include "arm_compute/runtime/IWeightsManager.h" -#include "arm_compute/runtime/MemoryGroup.h" -#include "arm_compute/runtime/Tensor.h" - -namespace arm_compute -{ -/* Convolution method supported by the assembly gemm interface */ -enum class AsmConvMethod -{ - Im2Col, - Indirect, - Conv -}; - -struct AsmGemmInfo -{ - AsmConvMethod method{ AsmConvMethod::Im2Col }; - PadStrideInfo ps_info{}; - ActivationLayerInfo activation_info{}; - GEMMLowpOutputStageInfo output_stage{}; - bool negated_offsets{ true }; - bool reinterpret_input_as_3d{ false }; - bool depth_output_gemm3d{ false }; - int64_t padding_top{ 0 }; - int64_t padding_left{ 0 }; - float padding_value{ 0.f }; -}; - -/** Assembly kernel glue */ -class NEGEMMAssemblyDispatch : public IFunction -{ -public: - /** Constructor */ - NEGEMMAssemblyDispatch(std::shared_ptr memory_manager = nullptr, IWeightsManager *weights_manager = nullptr); - /** Prevent instances of this class from being copy constructed */ - NEGEMMAssemblyDispatch(const NEGEMMAssemblyDispatch &) = delete; - /** Prevent instances of this class from being copied */ - NEGEMMAssemblyDispatch &operator=(const NEGEMMAssemblyDispatch &) = delete; - NEGEMMAssemblyDispatch(NEGEMMAssemblyDispatch &&) = default; - NEGEMMAssemblyDispatch &operator=(NEGEMMAssemblyDispatch &&) = default; - ~NEGEMMAssemblyDispatch() = default; - - class IFallback - { - public: - virtual void run() = 0; - virtual void prepare() = 0; - virtual bool is_configured() const = 0; - virtual ~IFallback() = default; - }; - -public: - /** If supported create a Compute Library function else fallback to the arm_gemm function. - * - * @param[in] a Input tensor (Matrix A) - * @param[in] b Input tensor (Matrix B) - * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations - * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. - * @param[in] info GEMM meta-data - */ - void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const AsmGemmInfo &info); - - /** Indicates whether or not this function can be used to process the given parameters. - * - * @param[in] a Input tensor info (Matrix A) - * @param[in] b Input tensor info (Matrix B) - * @param[in] c Input tensor info (Matrix C) used to pass the bias for quantized calculations - * @param[in] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. - * @param[in] info GEMM meta-data - * - * @return a status. - */ - static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info); - /** Checks if activation is supported by the gemm assembly dispatcher - * - * @param[in] activation Activation to check - * - * @return True if activation is supported else false - */ - static bool is_activation_supported(const ActivationLayerInfo &activation); - /** Was the function successfully configured ? - * - * @return True if the function is configured and ready to run - */ - bool is_configured() const; - - // Inherited methods overridden: - void prepare() override; - void run() override; - -private: - std::unique_ptr _arm_gemm; /**< Interface for the arm_gemm fallback */ - MemoryGroup _memory_group; /**< Function memory group */ - IWeightsManager *_weights_manager; /**< Pointer to the weights manager */ -}; -} // namespace arm_compute -#endif /* SRC_NEGEMMASSEMBLYDISPATCH_H */ diff --git a/src/runtime/NEON/functions/NEGEMMConv2d.cpp b/src/runtime/NEON/functions/NEGEMMConv2d.cpp index 0f6f93060d..ddeacc85f5 100644 --- a/src/runtime/NEON/functions/NEGEMMConv2d.cpp +++ b/src/runtime/NEON/functions/NEGEMMConv2d.cpp @@ -26,7 +26,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" +#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" #include @@ -66,10 +66,10 @@ GEMMLowpOutputStageInfo calculate_output_stage_metadata(const ITensorInfo *input quantization::calculate_quantized_multipliers(iqinfo, wqinfo, oqinfo, os_info); return os_info; } -AsmGemmInfo init_assembly_metadata(const Conv2dInfo &info, bool is_indirect) +cpu::AsmGemmInfo init_assembly_metadata(const Conv2dInfo &info, bool is_indirect) { - AsmGemmInfo asm_info; - asm_info.method = is_indirect ? AsmConvMethod::Indirect : AsmConvMethod::Conv; + cpu::AsmGemmInfo asm_info; + asm_info.method = is_indirect ? cpu::AsmConvMethod::Indirect : cpu::AsmConvMethod::Conv; asm_info.ps_info = info.conv_info; asm_info.activation_info = info.act_info; asm_info.depth_output_gemm3d = true; @@ -83,7 +83,7 @@ AsmGemmInfo init_assembly_metadata(const Conv2dInfo &info, bool is_indirect) } // namespace NEGEMMConv2d::NEGEMMConv2d(const std::shared_ptr &memory_manager) - : _gemm_asm_func(std::make_unique(memory_manager)), _activation_func(), _weights_permute_func(), _original_weights(nullptr), _permuted_weights(), _is_prepared(false), + : _gemm_asm_func(std::make_unique(memory_manager)), _activation_func(), _weights_permute_func(), _original_weights(nullptr), _permuted_weights(), _is_prepared(false), _run_activation(false) { } @@ -102,7 +102,7 @@ void NEGEMMConv2d::configure(ITensor *input, const ITensor *weights, const ITens _weights_permute_func.configure(weights, &_permuted_weights, PermutationVector{ 3, 0, 1, 2 }); // Configure assembly dispatch - AsmGemmInfo asm_info = init_assembly_metadata(info, false); + cpu::AsmGemmInfo asm_info = init_assembly_metadata(info, false); if(is_data_type_quantized(input->info()->data_type())) { asm_info.output_stage = calculate_output_stage_metadata(input->info(), weights->info(), output->info(), info.act_info); @@ -149,8 +149,8 @@ Status NEGEMMConv2d::validate(const ITensorInfo *input, const ITensorInfo *weigh ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); } - AsmGemmInfo asm_info = init_assembly_metadata(info, false); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMAssemblyDispatch::validate(input, weights, biases, output, asm_info)); + cpu::AsmGemmInfo asm_info = init_assembly_metadata(info, false); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuGemmAssemblyDispatch::validate(input, weights, biases, output, asm_info)); return Status{}; } void NEGEMMConv2d::run() diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp index 921626f0fe..53dd39e549 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,16 +42,16 @@ #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" #include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" -#include "src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" +#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" namespace arm_compute { namespace { -AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) +cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) { - AsmGemmInfo asm_info; - asm_info.method = AsmConvMethod::Im2Col; + cpu::AsmGemmInfo asm_info; + asm_info.method = cpu::AsmConvMethod::Im2Col; asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d(); asm_info.depth_output_gemm3d = info.depth_output_gemm3d(); asm_info.activation_info = info.activation_info(); @@ -66,7 +66,7 @@ using namespace arm_compute::misc::shape_calculator; NEGEMMLowpMatrixMultiplyCore::~NEGEMMLowpMatrixMultiplyCore() = default; NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr memory_manager, IWeightsManager *weights_manager) - : _memory_group(memory_manager), _weights_manager(weights_manager), _asm_glue(std::make_unique(memory_manager, weights_manager)), _mm_kernel(), _mtx_a_reshape_kernel(), + : _memory_group(memory_manager), _weights_manager(weights_manager), _asm_glue(std::make_unique(memory_manager, weights_manager)), _mm_kernel(), _mtx_a_reshape_kernel(), _mtx_b_reshape_kernel(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _offset_contribution_kernel(), _offset_contribution_output_stage_kernel(), _activation_func(), _convert_to_signed_asymm(), _convert_from_signed_asymm(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _mm_result_s32(), _signed_a(), _signed_output(), _original_b(nullptr), _a_offset(0), _b_offset(0), _run_vector_matrix_multiplication(false), _assembly_path(false), _fused_assembly_path(false), _reshape_b_only_on_first_run(false), _is_prepared(false), _fuse_output_stage(false), @@ -135,7 +135,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, } // Initialize assembly kernel meta-data - const AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); + const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); #ifdef __aarch64__ switch(a->info()->data_type()) { @@ -261,7 +261,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, } // Configure activation const ActivationLayerInfo &activation = gemm_info.activation_info(); - _run_activation = activation.enabled() && (!_assembly_path || !NEGEMMAssemblyDispatch::is_activation_supported(activation)); + _run_activation = activation.enabled() && (!_assembly_path || !cpu::CpuGemmAssemblyDispatch::is_activation_supported(activation)); if(_run_activation) { _activation_func.configure(output, nullptr, activation); @@ -362,19 +362,19 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso } // Initialize assembly kernel meta-data - const AsmGemmInfo asm_info = init_assembly_metadata(info); + const cpu::AsmGemmInfo asm_info = init_assembly_metadata(info); // Check if we need to run the optimized assembly kernel bool run_optimised = false; bool run_optimised_requantized = false; if(is_data_type_quantized_asymmetric(a_to_use->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) { - run_optimised = bool(NEGEMMAssemblyDispatch::validate(a_to_use, b, c, output, asm_info)); + run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a_to_use, b, c, output, asm_info)); run_optimised_requantized = run_optimised; } else { - run_optimised = bool(NEGEMMAssemblyDispatch::validate(a_to_use, b, nullptr, fuse_output_stage ? &mm_result_s32_info : output, asm_info)); + run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a_to_use, b, nullptr, fuse_output_stage ? &mm_result_s32_info : output, asm_info)); } if(run_optimised) diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp index 941cb21e5e..0bf1738bec 100644 --- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp @@ -34,7 +34,7 @@ #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" #include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h" -#include "src/runtime/NEON/functions/NEGEMMAssemblyDispatch.h" +#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" #include "src/core/NEON/kernels/convolution/common/utils.hpp" #include "src/core/NEON/kernels/convolution/winograd/winograd.hpp" diff --git a/src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp b/src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp new file mode 100644 index 0000000000..36c1bbb1b3 --- /dev/null +++ b/src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp @@ -0,0 +1,863 @@ +/* + * Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" + +#include "arm_compute/runtime/NEON/NEScheduler.h" +#include "src/core/CPP/Validate.h" +#include "src/core/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h" +#include "src/core/cpu/kernels/assembly/arm_gemm.hpp" + +#include +#include + +namespace arm_compute +{ +namespace cpu +{ +namespace +{ +struct free_delete +{ + void operator()(void *x) + { + free(x); + } +}; + +struct Params +{ + unsigned int M; + unsigned int N; + unsigned int K; + unsigned int batches; + unsigned int multis; + unsigned int sections; + bool indirect; +}; + +Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *d, const AsmGemmInfo &info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d); + + Params p; + p.M = d->info()->tensor_shape().y(); + p.K = a->info()->tensor_shape().x(); + p.N = d->info()->tensor_shape().x(); + p.batches = 1; + p.multis = 1; + p.sections = 1; + p.indirect = false; + + if(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect) + { + p.indirect = true; + p.sections = b->info()->tensor_shape()[2] * b->info()->tensor_shape()[3]; + } + else + { + p.multis = b->info()->tensor_shape().z(); + p.batches = d->info()->tensor_shape().total_size_upper(2) / p.multis; + } + + // Update M in case of GEMM3D for output + if(info.depth_output_gemm3d != 0) + { + p.M = d->info()->tensor_shape().y() * d->info()->tensor_shape().z(); + p.batches = d->info()->tensor_shape().total_size_upper(3) / p.multis; + } + + return p; +} + +arm_gemm::Activation map_to_arm_gemm_activation(const ActivationLayerInfo &act) +{ + arm_gemm::Activation gemm_act; + + // Early exit in case lower bound is other than 0, as it's not yet supported + if(act.b() != 0.f) + { + return gemm_act; + } + + switch(act.activation()) + { + case ActivationLayerInfo::ActivationFunction::RELU: + gemm_act.type = arm_gemm::Activation::Type::ReLU; + break; + case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: + gemm_act.type = arm_gemm::Activation::Type::BoundedReLU; + gemm_act.param1 = act.a(); + gemm_act.param2 = 0.f; + break; + case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU: + gemm_act.type = arm_gemm::Activation::Type::BoundedReLU; + gemm_act.param1 = act.a(); + gemm_act.param2 = act.b(); + break; + default: + gemm_act.type = arm_gemm::Activation::Type::None; + } + + return gemm_act; +} + +IScheduler::Hints scheduling_hint_heuristic(arm_gemm::GemmMethod method, DataType data_type) +{ + // Schedule assembly kernel + const int granule_threshold = 200; + IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX); + if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && data_type == DataType::F32) + { + scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold); + } + else if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (data_type == DataType::F32 || data_type == DataType::F16 || data_type == DataType::U8 || data_type == DataType::S8)) + { + //GEMM_INTERLEAVED supports 2D parallelism, IScheduler::split_dimensions_all signals to parallelise over all window dimensions + scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold); + } + else if(method == arm_gemm::GemmMethod::QUANTIZE_WRAPPER_2D && (data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED)) + { + //special case for QASYMM8 to support 2D parallelism, scheduler here may be tweaked differently compared to FP32 case + scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold); + } + + return scheduling_hint; +} + +template +class FallbackTransform : public ITransformWeights +{ +public: + FallbackTransform() noexcept {}; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + FallbackTransform(const FallbackTransform &) = delete; + /** Default move constructor */ + FallbackTransform(FallbackTransform &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + FallbackTransform &operator=(const FallbackTransform &) = delete; + /** Default move assignment operator */ + FallbackTransform &operator=(FallbackTransform &&) = default; + void run() override + { + _output.allocator()->allocate(); + ARM_COMPUTE_ERROR_ON(_output.buffer() == nullptr); + _gemm_kernel_asm->pretranspose_B_array(_output.buffer(), _in1_ptr, _ldb, _multi_stride_b); + _reshape_run = true; + } + + void release() override + { + _output.allocator()->free(); + } + + ITensor *get_weights() override + { + return &_output; + } + + uint32_t uid() override + { + uint32_t id = (_B_pretranspose_size | 0x80000000); + return id; + } + + void configure(size_t B_pretranspose_size, unsigned int alignment) + { + _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment) }, 1, DataType::S8), alignment); + _B_pretranspose_size = B_pretranspose_size; + } + + void set_pretranspose(ITensor *tensor) + { + if(!_reshape_run) + { + _gemm_kernel_asm->set_pretransposed_B_data(tensor->buffer()); + } + } + + void set_args(const int ldb, const TypeInput *in1_ptr, const int multi_stride_b, std::shared_ptr> gemm_kernel_asm) + { + _ldb = ldb; + _in1_ptr = in1_ptr; + _multi_stride_b = multi_stride_b; + _gemm_kernel_asm = gemm_kernel_asm; + } + +private: + Tensor _output{}; + int _ldb{}; + const TypeInput *_in1_ptr{}; + int _multi_stride_b{}; + size_t _B_pretranspose_size{}; + std::shared_ptr> _gemm_kernel_asm{ nullptr }; +}; + +/** Fallback in case ACL doesn't have a function */ +template +class Fallback : public CpuGemmAssemblyDispatch::IFallback +{ +public: + /** Destructor */ + ~Fallback() + { + // Release memory if we have allocated the memory ourselves + if(_pretranspose && !(_weights_manager && _weights_manager->are_weights_managed(_b))) + { + delete _pretranspose; + } + } + + /** Initialise the functions's input and output. + * + * @param[in] a Input tensor containing the Matrix A. + * @param[in] b Input tensor containing the Matrix B. + * @param[in] c Input tensor containing the Matrix C. + * @param[out] d Output tensor to store the result of matrix multiplication. + * @param[in] args Matrix multiplication information. + * @param[in] gemm_info GEMM meta-data + * @param[in] memory_group Memory group to be used by the function. + * @param[in] weights_manager Weights manager to be used by the function. + * @param[in] os Output stage meta-data. + */ + void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, + arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info, + MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os = {}); + + /** Set requantization shifts to be used + * + * @param[in] shifts Requantization shifts + * + * @return Pointer to the shift data + */ + /** Set requantization data to be used + * + * + * @param shifts Requantization shifts + * @param multipliers Requantization multipliers + * + * @return A tuple with the pointers to the shift and multiplier data respectively + */ + std::tuple set_requantize_data(const std::vector &shifts, + const std::vector &multipliers); + + // Inherited methods overridden: + void run() override; + void prepare() override; + bool is_configured() const override; + +private: + /** Allocate a workspace tensor. + * + * @param[in] workspace_size Size to allocate. + * @param[in] memory_group Tensor memory group. + * @param[in] alignment Workspace memory alignment. + */ + void allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment); + /** Configure the indirect buffer + * + * @param[in] a Input tensor containing the Matrix A. + * @param[in] b Input tensor containing the Matrix B. + * @param[out] d Output tensor to store the result of matrix multiplication. + * @param[in] info GEMM meta-data + */ + void configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info); + /** Prepare the indirect buffer */ + void prepare_indirect_buffer(); + + /** Assembly Gemm kernel */ + std::shared_ptr> _gemm_kernel_asm{ nullptr }; + /** Optimised Arm® Neon™ kernel */ + std::unique_ptr _optimised_kernel{ nullptr }; + /** Input A */ + const ITensor *_a + { + nullptr + }; + /** Input B */ + const ITensor *_b + { + nullptr + }; + const ITensor *_c + { + nullptr + }; + /** Output */ + ITensor *_d{ nullptr }; + /** GEMM workspace */ + Tensor _workspace{}; + /** Pre-transpose tensor */ + ITensor *_pretranspose{ nullptr }; + /** Prepared flag */ + bool _is_prepared{ false }; + /** GEMM meta-data */ + AsmGemmInfo _gemm_info{}; + /** Weights manager */ + IWeightsManager *_weights_manager{ nullptr }; + /** Weights transform object */ + FallbackTransform _weights_transform{}; + /** GEMM kernel description */ + arm_gemm::KernelDescription _kernel_info{}; + /** Per channel quantization shifts */ + std::vector _shifts{}; + std::vector right_shifts{}; + std::vector left_shifts{}; + /** Per channel quantization multipliers */ + std::vector _multipliers{}; + /** Indirect buffer */ + std::unique_ptr _indirect_arg{}; + std::unique_ptr _indirect_buf{}; + std::vector _indirect_pad{}; + arm_gemm::ConvolutionParameters _cp{}; +}; + +template +std::tuple +Fallback::set_requantize_data(const std::vector &shifts, const std::vector &multipliers) +{ + _multipliers = multipliers; + _shifts = shifts; + bool need_left = false; + for(const auto s : _shifts) + { + left_shifts.push_back(std::max(-s, int32_t(0))); + right_shifts.push_back(std::min(-s, int32_t(0))); + if(s < 0 && !need_left) + { + need_left = true; + } + } + return std::make_tuple(need_left, left_shifts.data(), right_shifts.data(), _multipliers.data()); +} + +template +void Fallback::prepare_indirect_buffer() +{ + const TypeInput *A_ptr = reinterpret_cast(_a->buffer()); + const int multis = 1; + const int batches = _a->info()->tensor_shape().total_size_upper(3); + const size_t stride_A = _a->info()->strides_in_bytes().y() / sizeof(TypeInput); + const size_t batch_stride_A = _a->info()->strides_in_bytes()[3] / sizeof(TypeInput); + const size_t multi_stride_A = _a->info()->strides_in_bytes()[4] / sizeof(TypeInput); + + const size_t output_hw = _cp.output_height * _cp.output_width; + const int batch_size = _cp.kernel_height * _cp.kernel_width * output_hw * sizeof(TypeInput); + const size_t batch_stride = batch_size / sizeof(TypeInput); + const int multi_size = batch_size * batches; + const size_t multi_stride = multi_size / sizeof(TypeInput); + + for(int64_t m = 0; m < multis; m++) + { + for(int64_t b = 0; b < batches; b++) + { + for(int64_t output_y = 0; output_y < _cp.output_height; output_y++) + { + for(int64_t output_x = 0; output_x < _cp.output_width; output_x++) + { + int64_t output_xy = (output_y * _cp.output_width) + output_x; + + for(int64_t kernel_y = 0; kernel_y < _cp.kernel_height; kernel_y++) + { + for(int64_t kernel_x = 0; kernel_x < _cp.kernel_width; kernel_x++) + { + int64_t input_x = (output_x * _cp.output_stride_w) + kernel_x - _cp.padding_left; + int64_t input_y = (output_y * _cp.output_stride_h) + kernel_y - _cp.padding_top; + int64_t kernel_xy = (kernel_y * _cp.kernel_width) + kernel_x; + int64_t input_xy = (input_y * _cp.input_width) + input_x; + + if(input_x < 0 || input_x >= _cp.input_width || input_y < 0 || input_y >= _cp.input_height) + { + _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] = _indirect_pad.data(); + } + else + { + _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] = + A_ptr + (m * multi_stride_A + b * batch_stride_A + input_xy * stride_A); + } + } + } + } + } + } + } +} + +template +void Fallback::configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info) +{ + ARM_COMPUTE_ERROR_ON(!(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect)); + + float zeropad = 0.f; + if(is_data_type_quantized(a->data_type())) + { + zeropad = a->quantization_info().uniform().offset; + } + + const int64_t input_width = static_cast(a->tensor_shape()[1]); + const int64_t input_height = static_cast(a->tensor_shape()[2]); + const int64_t input_channels = static_cast(a->tensor_shape()[0]); + const int64_t kernel_width = static_cast(b->tensor_shape()[2]); + const int64_t kernel_height = static_cast(b->tensor_shape()[3]); + const int64_t output_width = static_cast(d->tensor_shape()[1]); + const int64_t output_height = static_cast(d->tensor_shape()[2]); + + _cp = { input_width, input_height, input_channels, kernel_width, kernel_height, output_width, output_height, + info.ps_info.stride().first, info.ps_info.stride().second, info.padding_top, info.padding_left, zeropad + }; + + if(info.method == AsmConvMethod::Conv) + { + _gemm_kernel_asm->set_convolution_parameters(_cp); + } + + if(info.method == AsmConvMethod::Indirect) + { + const unsigned int multis = 1; + const unsigned int batches = a->tensor_shape().total_size_upper(3); + const unsigned int kernel_hw = _cp.kernel_width * _cp.kernel_height; + const unsigned int output_hw = _cp.output_width * _cp.output_height; + + using TypeInputPtr = TypeInput *; + const int batch_size = kernel_hw * output_hw * sizeof(TypeInputPtr); + const size_t batch_stride = batch_size / sizeof(TypeInputPtr); + const int multi_size = batch_size * batches; + const size_t multi_stride = multi_size / sizeof(TypeInputPtr); + + _indirect_buf = std::unique_ptr(reinterpret_cast(malloc(multi_size * multis))); + _indirect_arg = std::unique_ptr(reinterpret_cast(malloc(sizeof(TypeInput **) * kernel_hw * multis * batches))); + _indirect_pad = std::vector(_cp.input_channels, TypeInput(zeropad)); + + // Set indirect argument + int64_t pos = 0; + for(int64_t m = 0; m < multis; m++) + { + for(int64_t b = 0; b < batches; b++) + { + for(int64_t kernel_xy = 0; kernel_xy < kernel_hw; kernel_xy++) + { + (_indirect_arg.get())[pos++] = _indirect_buf.get() + m * multi_stride + b * batch_stride + kernel_xy * output_hw; + } + } + } + + _gemm_kernel_asm->set_indirect_parameters(a->tensor_shape()[0], _indirect_arg.get()); + } +} + +template +void Fallback::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, + arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info, + MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os) +{ + arm_gemm::GemmConfig gemm_cfg; + _kernel_info = arm_gemm::get_gemm_method(args, os); + _weights_manager = weights_manager; + if(_kernel_info.method != arm_gemm::GemmMethod::GEMV_BATCHED) + { + gemm_cfg.filter = _kernel_info.name; + args._cfg = &gemm_cfg; + } + _gemm_kernel_asm = arm_gemm::gemm(args, os); + if(_gemm_kernel_asm == nullptr) + { + //configuration not supported: Leave function unconfigured: + return; + } + + // arm_compute wrapper for the Gemm object (see above) + auto acl_gemm_wrapper = std::make_unique>(); + ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr); + acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter); + const size_t workspace_size = _gemm_kernel_asm->get_working_size(); + if(workspace_size > 0) + { + // Allocate workspace + const unsigned int alignment = 4096; + allocate_workspace(workspace_size, memory_group, alignment); + } + + //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and + //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001 + { + const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size(); + if(window_size < static_cast(args._maxthreads)) + { + _gemm_kernel_asm->set_nthreads(window_size); + } + } + + _optimised_kernel = std::move(acl_gemm_wrapper); + _a = a; + _b = b; + _c = c; + _d = d; + _gemm_info = gemm_info; + // Check for pre-transposed support + if(_gemm_kernel_asm->B_pretranspose_required()) + { + // Forcing 128-byte alignment (required by 32-bit kernels) + const unsigned int alignment = 128; + const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size(); + if(weights_manager && _weights_manager->are_weights_managed(b)) + { + _weights_transform.configure(B_pretranspose_size, alignment); + _pretranspose = _weights_manager->acquire(b, &_weights_transform); + } + else + { + _pretranspose = new Tensor(); + static_cast(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment) }, 1, DataType::S8), alignment); + } + } + + // Handle indirect GEMM convolution + if(gemm_info.method == AsmConvMethod::Conv || gemm_info.method == AsmConvMethod::Indirect) + { + configure_indirect(a->info(), b->info(), d->info(), gemm_info); + } +} + +template +void Fallback::prepare() +{ + if(!_is_prepared) + { + // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C. + if(_c && _c->info()->data_type() == DataType::S32) + { + _gemm_kernel_asm->set_quantized_bias(reinterpret_cast(_c->buffer() + _c->info()->offset_first_element_in_bytes()), 0); + } + + // Pretranspose B if required + if(_gemm_kernel_asm->B_pretranspose_required()) + { + const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput); + const auto in1_ptr = reinterpret_cast(_b->buffer() + _b->info()->offset_first_element_in_bytes()); + const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput); + + if(_weights_manager && _weights_manager->are_weights_managed(_b)) + { + _weights_transform.set_args(ldb, in1_ptr, multi_stride_b, _gemm_kernel_asm); + _weights_manager->run(_b, &_weights_transform); + + // If we didn't run the reshape function, set the pretransposed buffer + if(!_weights_transform.is_reshape_run()) + { + _weights_transform.set_pretranspose(_pretranspose); + } + } + else + { + static_cast(_pretranspose)->allocator()->allocate(); + ARM_COMPUTE_ERROR_ON(_pretranspose->buffer() == nullptr); + _gemm_kernel_asm->pretranspose_B_array(_pretranspose->buffer(), in1_ptr, ldb, multi_stride_b); + _b->mark_as_unused(); + } + } + + if(_gemm_info.method == AsmConvMethod::Indirect) + { + prepare_indirect_buffer(); + } + + _is_prepared = true; + } +} + +template +void Fallback::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment) +{ + ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0"); + _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment) }, 1, DataType::S8), alignment); + memory_group.manage(&_workspace); + _workspace.allocator()->allocate(); +} + +template +bool Fallback::is_configured() const +{ + return _optimised_kernel != nullptr; +} + +template +void Fallback::run() +{ + int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput); + int ldb = 0; + const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput); + + const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d != 0 ? 3 : 2; + const size_t a_multi_idx = a_batch_idx + 1; + const size_t d_batch_idx = _gemm_info.depth_output_gemm3d != 0 ? 3 : 2; + const size_t d_multi_idx = d_batch_idx + 1; + + int batch_stride_a = _a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput); + const int batch_stride_d = _d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput); + + int multi_stride_a = _a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput); + int multi_stride_b = 0; + const int multi_stride_d = _d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput); + + auto in0_ptr = reinterpret_cast(_a->buffer() + _a->info()->offset_first_element_in_bytes()); + const TypeInput *in1_ptr = nullptr; + auto out_ptr = reinterpret_cast(_d->buffer() + _d->info()->offset_first_element_in_bytes()); + + // Check if B is pre-tranposed and de-reference if not + if(!_gemm_kernel_asm->B_is_pretransposed()) + { + ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput); + multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput); + in1_ptr = reinterpret_cast(_b->buffer() + _b->info()->offset_first_element_in_bytes()); + } + + const auto scheduling_hint = scheduling_hint_heuristic(_kernel_info.method, _d->info()->data_type()); + + // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads + if(_workspace.buffer() != nullptr) + { + _gemm_kernel_asm->set_working_space(reinterpret_cast(_workspace.buffer())); + const unsigned int split_dim = scheduling_hint.split_dimension(); + const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size(); + unsigned int num_threads = NEScheduler::get().num_threads(); + if(window_size < num_threads) + { + num_threads = window_size; + } + if(split_dim != IScheduler::split_dimensions_all) + { + // Make sure the kernel does not expect more threads than we can actually spawn + const unsigned int num_iterations = _optimised_kernel.get()->window().num_iterations(split_dim); + num_threads = std::min(num_iterations, num_threads); + } + _gemm_kernel_asm->set_nthreads(num_threads); + } + + // Prepare assembly kernel + prepare(); + + // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C. + TypeOutput *bias = nullptr; + if(_c && _c->info()->data_type() != DataType::S32) + { + bias = reinterpret_cast(_c->buffer() + _c->info()->offset_first_element_in_bytes()); + } + + if(_gemm_info.method == AsmConvMethod::Indirect) + { + in0_ptr = nullptr; + lda = 0; + batch_stride_a = 0; + multi_stride_a = 0; + } + + // Set gemm parameters + _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a, + in1_ptr, ldb, multi_stride_b, + out_ptr, ldd, batch_stride_d, multi_stride_d, + bias, 0); + // Schedule + NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint); +} + +template +void create_arm_gemm(std::unique_ptr &arm_gemm, MemoryGroup &memory_group, + const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const AsmGemmInfo &info, + IWeightsManager *weights_manager) +{ + Params p = extract_parameters(a, b, d, info); + const CPUInfo &ci = NEScheduler::get().cpu_info(); + unsigned int num_threads = NEScheduler::get().num_threads(); + + arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads); + + // Create arm_gemm fallback + auto fallback = std::make_unique>(); + fallback->configure(a, b, c, d, args, info, memory_group, weights_manager); + arm_gemm = std::move(fallback); +} + +template +void create_arm_gemm_quant(std::unique_ptr &arm_gemm, MemoryGroup &memory_group, + const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const AsmGemmInfo &info, + IWeightsManager *weights_manager) +{ + ARM_COMPUTE_UNUSED(activation); + Params p = extract_parameters(a, b, d, info); + const CPUInfo &ci = NEScheduler::get().cpu_info(); + unsigned int num_threads = NEScheduler::get().num_threads(); + + arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads); + + // Create arm_gemm fallback + auto fallback = std::make_unique>(); + + // Configure requantization info + const int32_t negation = info.negated_offsets ? 1 : -1; + const int32_t a_offset = -a->info()->quantization_info().uniform().offset * negation; + const int32_t b_offset = -b->info()->quantization_info().uniform().offset * negation; + const GEMMLowpOutputStageInfo os_info = info.output_stage; + + arm_gemm::Requantize32 gemm_requant_info{}; + if(os_info.gemmlowp_shifts.size() > 1) + { + const auto requantize_data = fallback->set_requantize_data(os_info.gemmlowp_shifts, os_info.gemmlowp_multipliers); + gemm_requant_info = arm_gemm::Requantize32(nullptr, 0, + a_offset, b_offset, os_info.gemmlowp_offset, + (std::get<0>(requantize_data)) ? std::get<1>(requantize_data) : nullptr, + std::get<2>(requantize_data), + std::get<3>(requantize_data), + os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound); + } + else + { + gemm_requant_info = arm_gemm::Requantize32(nullptr, 0, + a_offset, b_offset, os_info.gemmlowp_offset, + -os_info.gemmlowp_shift, os_info.gemmlowp_multiplier, + os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound); + } + + // Configure fallback + fallback->configure(a, b, c, d, args, info, memory_group, weights_manager, gemm_requant_info); + arm_gemm = std::move(fallback); +} + +} //namespace + +CpuGemmAssemblyDispatch::CpuGemmAssemblyDispatch(std::shared_ptr memory_manager, IWeightsManager *weights_manager) + : _arm_gemm(nullptr), _memory_group(std::move(memory_manager)), _weights_manager(weights_manager) +{ +} + +Status CpuGemmAssemblyDispatch::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info) +{ + ARM_COMPUTE_UNUSED(c, info); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(a, b, d); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a); + +#ifndef __aarch64__ + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->element_size() == 1, "8bit integer types only supported for aarch64"); +#endif /* __aarch64__ */ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S8, + DataType::BFLOAT16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::S8, + DataType::BFLOAT16, DataType::F16, DataType::F32); + if(is_data_type_quantized_per_channel(b->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8_SIGNED, DataType::S8); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); + } + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && d->data_type() != DataType::F32, "Only F32 output supported for BFLOAT16 input"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::QASYMM8, "Only QASYMM8 output supported for QASYMM8 input"); + return Status{}; +} + +bool CpuGemmAssemblyDispatch::is_activation_supported(const ActivationLayerInfo &activation) +{ + arm_gemm::Activation act = map_to_arm_gemm_activation(activation); + return act.type != arm_gemm::Activation::Type::None; +} + +void CpuGemmAssemblyDispatch::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const AsmGemmInfo &info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d); + arm_gemm::Activation act = map_to_arm_gemm_activation(info.activation_info); + + //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured() + if(!CpuGemmAssemblyDispatch::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, d->info(), info)) + { + return; + } + + switch(a->info()->data_type()) + { + case DataType::F32: + create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + break; +#ifdef __aarch64__ + case DataType::U8: + case DataType::QASYMM8: + if(d->info()->data_type() == DataType::S32) + { + create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + } + else + { + create_arm_gemm_quant(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + } + break; + case DataType::S8: + case DataType::QASYMM8_SIGNED: + if(d->info()->data_type() == DataType::S32) + { + create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + } + else + { + create_arm_gemm_quant(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + } + break; +#endif /* __aarch64__ */ +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) + case DataType::BFLOAT16: + create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + break; +#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + create_arm_gemm(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager); + break; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + default: + break; + } +} + +void CpuGemmAssemblyDispatch::prepare() +{ + ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr); + _arm_gemm->prepare(); +} + +bool CpuGemmAssemblyDispatch::is_configured() const +{ + return _arm_gemm != nullptr && _arm_gemm->is_configured(); +} + +void CpuGemmAssemblyDispatch::run() +{ + MemoryGroupResourceScope scope_mg(_memory_group); + + ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr); + _arm_gemm->run(); +} +} // namespace cpu +} // namespace arm_compute diff --git a/src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h b/src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h new file mode 100644 index 0000000000..0bbae49a7e --- /dev/null +++ b/src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_NEGEMMASSEMBLYDISPATCH_H +#define SRC_NEGEMMASSEMBLYDISPATCH_H + +#include "arm_compute/runtime/IFunction.h" +#include "arm_compute/runtime/IMemoryManager.h" +#include "arm_compute/runtime/IWeightsManager.h" +#include "arm_compute/runtime/MemoryGroup.h" +#include "arm_compute/runtime/Tensor.h" + +namespace arm_compute +{ +namespace cpu +{ +/* Convolution method supported by the assembly gemm interface */ +enum class AsmConvMethod +{ + Im2Col, + Indirect, + Conv +}; + +struct AsmGemmInfo +{ + AsmConvMethod method{ AsmConvMethod::Im2Col }; + PadStrideInfo ps_info{}; + ActivationLayerInfo activation_info{}; + GEMMLowpOutputStageInfo output_stage{}; + bool negated_offsets{ true }; + bool reinterpret_input_as_3d{ false }; + bool depth_output_gemm3d{ false }; + int64_t padding_top{ 0 }; + int64_t padding_left{ 0 }; + float padding_value{ 0.f }; +}; + +/** Assembly kernel glue */ +class CpuGemmAssemblyDispatch : public IFunction +{ +public: + /** Constructor */ + CpuGemmAssemblyDispatch(std::shared_ptr memory_manager = nullptr, IWeightsManager *weights_manager = nullptr); + /** Prevent instances of this class from being copy constructed */ + CpuGemmAssemblyDispatch(const CpuGemmAssemblyDispatch &) = delete; + /** Prevent instances of this class from being copied */ + CpuGemmAssemblyDispatch &operator=(const CpuGemmAssemblyDispatch &) = delete; + /** Default move constructor */ + CpuGemmAssemblyDispatch(CpuGemmAssemblyDispatch &&) = default; + /** Default move assignment operator */ + CpuGemmAssemblyDispatch &operator=(CpuGemmAssemblyDispatch &&) = default; + /** Defautl destructor */ + ~CpuGemmAssemblyDispatch() = default; + + class IFallback + { + public: + virtual void run() = 0; + virtual void prepare() = 0; + virtual bool is_configured() const = 0; + virtual ~IFallback() = default; + }; + +public: + /** If supported create a Compute Library function else fallback to the arm_gemm function. + * + * @param[in] a Input tensor (Matrix A) + * @param[in] b Input tensor (Matrix B) + * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations + * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. + * @param[in] info GEMM meta-data + */ + void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const AsmGemmInfo &info); + + /** Indicates whether or not this function can be used to process the given parameters. + * + * @param[in] a Input tensor info (Matrix A) + * @param[in] b Input tensor info (Matrix B) + * @param[in] c Input tensor info (Matrix C) used to pass the bias for quantized calculations + * @param[in] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. + * @param[in] info GEMM meta-data + * + * @return a status. + */ + static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info); + /** Checks if activation is supported by the gemm assembly dispatcher + * + * @param[in] activation Activation to check + * + * @return True if activation is supported else false + */ + static bool is_activation_supported(const ActivationLayerInfo &activation); + /** Was the function successfully configured ? + * + * @return True if the function is configured and ready to run + */ + bool is_configured() const; + + // Inherited methods overridden: + void prepare() override; + void run() override; + +private: + std::unique_ptr _arm_gemm; /**< Interface for the arm_gemm fallback */ + MemoryGroup _memory_group; /**< Function memory group */ + IWeightsManager *_weights_manager; /**< Pointer to the weights manager */ +}; +} // namespace cpu +} // namespace arm_compute +#endif /* SRC_NEGEMMASSEMBLYDISPATCH_H */ -- cgit v1.2.1