diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-06-21 12:00:43 +0100 |
---|---|---|
committer | Michele Di Giorgio <michele.digiorgio@arm.com> | 2021-06-29 13:29:01 +0000 |
commit | 93b75e0c072c3cc5654fcdf6aed1068b40012081 (patch) | |
tree | 08acbf1bcafaa326bea1d8e472ad66b955c7c17f /src | |
parent | 5fdde99f4271891a40c02cd1e89f1344aa84583a (diff) | |
download | ComputeLibrary-93b75e0c072c3cc5654fcdf6aed1068b40012081.tar.gz |
Port NEGEMM to memory injecting interface (Part 1)
- Start porting NEGEMM to the new API
- Port NEGEMMInterleave4x4Kernel to the new API
- Port NEGEMMMatrixAdditionKernel to the new API
- Port NEGEMMTranspose1xWKernel to the new API
- Remove padding from NEGEMMMatrixAdditionKernel
- Remove unused INESimpleKernel and ICPPSimpleKernel
Partially resolves: COMPMID-4402
Change-Id: I63edadddfe00a54586e5384d6a0211db25ae9042
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5857
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
22 files changed, 647 insertions, 788 deletions
diff --git a/src/core/CPP/ICPPSimpleKernel.cpp b/src/core/CPP/ICPPSimpleKernel.cpp deleted file mode 100644 index 9e4df5ec8a..0000000000 --- a/src/core/CPP/ICPPSimpleKernel.cpp +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/CPP/ICPPSimpleKernel.h" - -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/IAccessWindow.h" -#include "arm_compute/core/ITensor.h" -#include "src/core/helpers/WindowHelpers.h" - -namespace arm_compute -{ -namespace -{ -std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int num_elems_processed_per_iteration, - bool border_undefined, const arm_compute::BorderSize &border_size) -{ - // Configure kernel window - Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration), border_undefined, border_size); - AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); - - bool window_changed = update_window_and_padding(win, input_access, output_access); - - output_access.set_valid_region(win, input->valid_region(), border_undefined, border_size); - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); -} -} // namespace - -ICPPSimpleKernel::ICPPSimpleKernel() - : _input{ nullptr }, _output{ nullptr } -{ -} - -void ICPPSimpleKernel::configure(const ITensor *input, ITensor *output, unsigned int num_elems_processed_per_iteration, bool border_undefined, const BorderSize &border_size) -{ - _input = input; - _output = output; - - // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info(), num_elems_processed_per_iteration, border_undefined, border_size); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - ICPPKernel::configure(win_config.second); -} - -Status ICPPSimpleKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_elems_processed_per_iteration, - bool border_undefined, const arm_compute::BorderSize &border_size) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration, border_undefined, border_size).first); - return Status{}; -} - -} // namespace arm_compute diff --git a/src/core/NEON/INESimpleKernel.h b/src/core/NEON/INESimpleKernel.h deleted file mode 100644 index 2986e7b5c9..0000000000 --- a/src/core/NEON/INESimpleKernel.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_INESIMPLEKERNEL_H -#define ARM_COMPUTE_INESIMPLEKERNEL_H - -#include "arm_compute/core/CPP/ICPPSimpleKernel.h" - -namespace arm_compute -{ -/** Interface for simple CPU kernels having 1 tensor input and 1 tensor output */ -using INESimpleKernel = ICPPSimpleKernel; -} // namespace arm_compute -#endif /*ARM_COMPUTE_INESIMPLEKERNEL_H */ diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h index 268871a4e8..0f7475c0b5 100644 --- a/src/core/NEON/NEKernels.h +++ b/src/core/NEON/NEKernels.h @@ -41,14 +41,11 @@ #include "src/core/NEON/kernels/NEFFTScaleKernel.h" #include "src/core/NEON/kernels/NEFillBorderKernel.h" #include "src/core/NEON/kernels/NEFuseBatchNormalizationKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/NEON/kernels/NEGatherKernel.h" #include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h" #include "src/core/NEON/kernels/NEIm2ColKernel.h" diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h deleted file mode 100644 index e592d5ef6e..0000000000 --- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H -#define ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H - -#include "src/core/NEON/INESimpleKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Kernel to interleave the elements of a matrix - * - * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values) - * - * @f[ - * \left( \begin{array}{cccc} - * a00 & a01 & a02 & a03 \\ - * a10 & a11 & a12 & a13 \\ - * a20 & a21 & a22 & a23 \\ - * a30 & a31 & a32 & a33 \\ - * \end{array} \right) - * \rightarrow - * \left( \begin{array}{ccccccccccccccccc} - * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\ - * \end{array} \right) - * @f] - * - * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ] - */ -class NEGEMMInterleave4x4Kernel : public INESimpleKernel -{ -public: - const char *name() const override - { - return "NEGEMMInterleave4x4Kernel"; - } - /** Constructor */ - NEGEMMInterleave4x4Kernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMInterleave4x4Kernel(const NEGEMMInterleave4x4Kernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMInterleave4x4Kernel &operator=(const NEGEMMInterleave4x4Kernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMInterleave4x4Kernel(NEGEMMInterleave4x4Kernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMInterleave4x4Kernel &operator=(NEGEMMInterleave4x4Kernel &&) = default; - /** Default destructor */ - ~NEGEMMInterleave4x4Kernel() = default; - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: All - * @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input. - */ - void configure(const ITensor *input, ITensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMInterleave4x4Kernel - * - * @param[in] input Input tensor info. Data types supported: All - * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - /** Template function to run gemm interleave 4x4 - * - * @tparam ScalarType Scalar datatype - * - * @param[in] input Input tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - template <typename ScalarType> - void gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window); - - /** Common signature for all the specialised gemm interleave 4x4 functions - * - * @param[in] input Input tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - using GEMMInterleaveFunctionFuncPtr = void (NEGEMMInterleave4x4Kernel::*)(const ITensor *input, ITensor *output, const Window &window); - - GEMMInterleaveFunctionFuncPtr _func; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H*/ diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp index b95bdd4ca5..6bcf59ee96 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp @@ -686,7 +686,7 @@ void inline matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &out, int const auto width_out = static_cast<int>(out_info.dimension(0)); const auto height_out = static_cast<int>(out_info.dimension(1)); const size_t out_stride = out_info.strides_in_bytes()[1] / out_info.element_size(); - // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW + // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with CpuGemmInterleave4x4 and CpuGemmTranspose1xW // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration // All the values needed for computing a single 4x4 block will be read from consecutive memory positions execute_window_loop(window, [&](const Coordinates & id) diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h index acfb79edeb..b9a1b5e840 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h @@ -61,7 +61,7 @@ public: ~NEGEMMLowpMatrixMultiplyKernel() = default; /** Initialise the kernel's input and output. * - * The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two + * The input matrices @p input0 and @p input1 must be the output of the kernels: cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel. These two * kernels change the layout of the original matrices to be more cache-friendly. * * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: U8/QASYMM8/S8/QASYMM8_SIGNED diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp deleted file mode 100644 index 6a2802a991..0000000000 --- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "src/core/CPP/Validate.h" -#include "src/core/NEON/NEFixedPoint.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" - -#include <arm_neon.h> - -namespace arm_compute -{ -namespace -{ -constexpr unsigned int num_elems_processed_per_iteration = 16; - -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float beta) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_UNUSED(beta); - - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - - if(output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); - } - - return Status{}; -} - -void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &window, float beta) -{ - const float32x4_t beta_f32 = vdupq_n_f32(beta); - - Iterator in(input, window); - Iterator out(output, window); - - execute_window_loop(window, [&](const Coordinates &) - { - const auto in_ptr = reinterpret_cast<const float *>(in.ptr()); - const auto out_ptr = reinterpret_cast<float *>(out.ptr()); - - float32x4x4_t alpha_ab = vld4q_f32(out_ptr); - const float32x4x4_t c = vld4q_f32(in_ptr); - - // Multiply matrix C by its weight and accumulate - alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32); - alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32); - alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32); - alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32); - - vst4q_f32(out_ptr, alpha_ab); - }, - in, out); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &window, float beta) -{ - const float16x8_t beta_f16 = vdupq_n_f16(beta); - - Iterator in(input, window); - Iterator out(output, window); - - execute_window_loop(window, [&](const Coordinates &) - { - const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr()); - const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr()); - - float16x8x2_t alpha_ab = vld2q_f16(out_ptr); - const float16x8x2_t c = vld2q_f16(in_ptr); - // Multiply matrix C by its weight and accumulate - alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16)); - alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16)); - - vst2q_f16(out_ptr + 0, alpha_ab); - }, - in, out); -} -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - -} // namespace - -NEGEMMMatrixAdditionKernel::NEGEMMMatrixAdditionKernel() - : INESimpleKernel(), _func(nullptr), _beta(0.0f) -{ -} - -void NEGEMMMatrixAdditionKernel::configure(const ITensor *input, ITensor *output, float beta) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), beta)); - - switch(input->info()->data_type()) - { - case DataType::F32: - _func = &matrix_addition_f32; - break; - case DataType::F16: -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - _func = &matrix_addition_f16; - break; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - default: - ARM_COMPUTE_ERROR("Data type not supported"); - break; - } - - // Configure kernel window - INESimpleKernel::configure(input, output, num_elems_processed_per_iteration); - - _beta = beta; -} - -Status NEGEMMMatrixAdditionKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float beta) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, beta)); - ARM_COMPUTE_RETURN_ON_ERROR(INESimpleKernel::validate(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration)); - return Status{}; -} - -void NEGEMMMatrixAdditionKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window); - - if(_beta != 0.0f) - { - (*_func)(_input, _output, window, _beta); - } -} -} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h index 3bc162a1b4..4341ff00df 100644 --- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h +++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h @@ -32,7 +32,7 @@ class ITensor; /** Kernel to multiply two input matrices "A" and "B". All elements of the output matrix/vector will be multiplied by alpha after the matrix multiplication * - * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref NEGEMMInterleave4x4Kernel" and @ref NEGEMMTranspose1xWKernel + * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped * */ @@ -55,7 +55,7 @@ public: NEGEMMMatrixMultiplyKernel &operator=(NEGEMMMatrixMultiplyKernel &&) = default; /** Initialise the kernel's input and output. * - * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel + * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * These two kernels change the layout of the original matrices to be more cache-friendly. * * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: F16/F32 @@ -63,7 +63,7 @@ public: * If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Weight of the matrix product - * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel + * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped */ void configure(const ITensor *input0, const ITensor *input1, ITensor *output, float alpha, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo()); @@ -74,7 +74,7 @@ public: * If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0 * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Weight of the matrix product - * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel + * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped * * @return a status diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp index 9011680c9b..67f2a490cd 100644 --- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp +++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp @@ -21,118 +21,49 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" +#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "src/core/NEON/INEKernel.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include <arm_neon.h> -#include <cstddef> -#include <cstdint> -#include <tuple> -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - -namespace +namespace arm_compute { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - - if(output->total_size() != 0) - { - TensorShape output_shape = input->tensor_shape(); - output_shape.set(0, input->dimension(0) * 4); - output_shape.set(1, std::ceil(input->dimension(1) / 4.0f)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } - - return Status{}; -} -} // namespace - -NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel() - : _func(nullptr) +namespace cpu { -} - -void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output) +namespace kernels { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_interleaved_shape(*input->info()))); - - // Perform validate step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); - - _input = input; - _output = output; - - switch(input->info()->element_size()) - { - case 1: - _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint8_t>; - break; - case 2: - _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint16_t>; - break; - case 4: - _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint32_t>; - break; - default: - ARM_COMPUTE_ERROR_ON("Element size not supported"); - break; - } - - Window win = calculate_max_window(*input->info(), Steps(1, 4)); - - INEKernel::configure(win); -} +using namespace arm_compute::misc::shape_calculator; -Status NEGEMMInterleave4x4Kernel::validate(const ITensorInfo *input, const ITensorInfo *output) +namespace { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); - - return Status{}; -} - template <typename ScalarType> -void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window) +void gemm_interleave4x4(const ITensor *src, ITensor *dst, const Window &window) { const size_t window_start_x = window.x().start(); const size_t window_end_x = window.x().end(); - const size_t in_height = input->info()->dimension(1); - const size_t in_stride = input->info()->strides_in_bytes()[1]; + const size_t in_height = src->info()->dimension(1); + const size_t in_stride = src->info()->strides_in_bytes()[1]; const size_t partial_y = in_height % 4; - // Set window for the input tensor + // Set window for the src tensor Window win = window; win.set(Window::DimX, Window::Dimension(0, 1, 1)); - // Set window for the output tensor + // Set window for the dst tensor Window win_out(window); win_out.set(Window::DimX, Window::Dimension(0, 1, 1)); win_out.scale(Window::DimY, 0.25f); - Iterator in(input, win); - Iterator out(output, win_out); + Iterator in(src, win); + Iterator out(dst, win_out); execute_window_loop(win, [&](const Coordinates & id) { @@ -167,13 +98,62 @@ void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor }, in, out); } +} // namespace -void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info) +void CpuGemmInterleave4x4Kernel::configure(const ITensorInfo *src, ITensorInfo *dst) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + + // dst auto inizialitation if not yet initialized + auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_interleaved_shape(*src))); + + // Perform validate step + ARM_COMPUTE_ERROR_THROW_ON(CpuGemmInterleave4x4Kernel::validate(src, dst)); + + switch(src->element_size()) + { + case 1: + _func = &gemm_interleave4x4<uint8_t>; + break; + case 2: + _func = &gemm_interleave4x4<uint16_t>; + break; + case 4: + _func = &gemm_interleave4x4<uint32_t>; + break; + default: + ARM_COMPUTE_ERROR_ON("Element size not supported"); + break; + } + + Window win = calculate_max_window(*src, Steps(1, 4)); + ICPPKernel::configure(win); +} + +Status CpuGemmInterleave4x4Kernel::validate(const ITensorInfo *src, const ITensorInfo *dst) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions. + ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); + + if(dst->total_size() != 0) + { + const TensorShape dst_shape = compute_interleaved_shape(*src); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); + } + + return Status{}; +} + +void CpuGemmInterleave4x4Kernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); ARM_COMPUTE_ERROR_ON(_func == nullptr); + ARM_COMPUTE_ERROR_ON(tensors.empty()); /* * This kernel puts the values in a 4x4 block of Matrix A on the same row (Interleaved values) * |a00 a01 a02 a03| @@ -181,7 +161,18 @@ void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info * |a20 a21 a22 a23| = | a00 a10 a20 a30 || a01 a11 a21 a31 || a02 a12 a22 a32 || a03 a13 a23 a33 | * |a30 a31 a32 a33| * - * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ] + * After this operation, the dst matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ] */ - (this->*_func)(_input, _output, window); + const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC); + ITensor *dst = tensors.get_tensor(TensorType::ACL_DST); + + (*_func)(src, dst, window); +} + +const char *CpuGemmInterleave4x4Kernel::name() const +{ + return "CpuGemmInterleave4x4Kernel"; } +} // namespace kernels +} // namespace cpu +} // namespace arm_compute diff --git a/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h new file mode 100644 index 0000000000..94d88c7001 --- /dev/null +++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H +#define ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/cpu/ICpuKernel.h" + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +/** Kernel to interleave the elements of a matrix + * + * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values) + * + * @f[ + * \left( \begin{array}{cccc} + * a00 & a01 & a02 & a03 \\ + * a10 & a11 & a12 & a13 \\ + * a20 & a21 & a22 & a23 \\ + * a30 & a31 & a32 & a33 \\ + * \end{array} \right) + * \rightarrow + * \left( \begin{array}{ccccccccccccccccc} + * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\ + * \end{array} \right) + * @f] + * + * After this operation, the dst matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ] + */ +class CpuGemmInterleave4x4Kernel : public ICpuKernel +{ +public: + /** Default Constructor */ + CpuGemmInterleave4x4Kernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmInterleave4x4Kernel); + /** Initialise the kernel's src and dst. + * + * @param[in] src Input tensor info. Data types supported: All + * @param[out] dst Output tensor info which stores the interleaved matrix. Data type supported: same as @p src. + */ + void configure(const ITensorInfo *src, ITensorInfo *dst); + /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmInterleave4x4Kernel + * + * Similar to @ref CpuGemmInterleave4x4Kernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; + +private: + /** Common signature for all the specialised gemm interleave 4x4 functions + * + * @param[in] src Input tensor. Data types supported: uint32_t, uint16_t and uint8_t + * @param[out] dst Output tensor. Data types supported: uint32_t, uint16_t and uint8_t + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + using GEMMInterleaveFunctionPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window); + + GEMMInterleaveFunctionPtr _func{ nullptr }; +}; +} // namespace kernels +} // namespace cpu +} // namespace arm_compute +#endif /*ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H*/ diff --git a/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp new file mode 100644 index 0000000000..cc39cdfe83 --- /dev/null +++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" +#include "src/core/NEON/NEFixedPoint.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/core/helpers/WindowHelpers.h" + +#include <arm_neon.h> + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +namespace +{ +void matrix_addition_f32(const ITensor *src, ITensor *dst, const Window &window, float beta) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + const float32x4_t beta_f32 = vdupq_n_f32(beta); + + constexpr int window_step_x = 16; + const auto window_start_x = static_cast<int>(window.x().start()); + const auto window_end_x = static_cast<int>(window.x().end()); + + Window win = window.collapse_if_possible(window, Window::DimZ); + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator in(src, win); + Iterator out(dst, win); + + execute_window_loop(win, [&](const Coordinates &) + { + const auto in_ptr = reinterpret_cast<const float *>(in.ptr()); + const auto out_ptr = reinterpret_cast<float *>(out.ptr()); + + int x = window_start_x; + for(; x < (window_end_x - window_step_x); x += window_step_x) + { + float32x4x4_t alpha_ab = vld4q_f32(out_ptr + x); + const float32x4x4_t c = vld4q_f32(in_ptr + x); + + // Multiply matrix C by its weight and accumulate + alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32); + alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32); + alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32); + alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32); + + vst4q_f32(out_ptr + x, alpha_ab); + } + + // Left-over loop + for(; x < window_end_x; ++x) + { + *(out_ptr + x) += *(in_ptr + x) * beta; + } + }, + in, out); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +void matrix_addition_f16(const ITensor *src, ITensor *dst, const Window &window, float beta) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + const float16x8_t beta_f16 = vdupq_n_f16(beta); + + constexpr int window_step_x = 16; + const auto window_start_x = static_cast<int>(window.x().start()); + const auto window_end_x = static_cast<int>(window.x().end()); + + Window win = window.collapse_if_possible(window, Window::DimZ); + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator in(src, win); + Iterator out(dst, win); + + execute_window_loop(window, [&](const Coordinates &) + { + const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr()); + const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr()); + + int x = window_start_x; + for(; x < (window_end_x - window_step_x); x += window_step_x) + { + float16x8x2_t alpha_ab = vld2q_f16(out_ptr + x); + const float16x8x2_t c = vld2q_f16(in_ptr + x); + // Multiply matrix C by its weight and accumulate + alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16)); + alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16)); + + vst2q_f16(out_ptr + x, alpha_ab); + } + + // Left-over loop + for(; x < window_end_x; ++x) + { + *(out_ptr + x) += *(in_ptr + x) * static_cast<float16_t>(beta); + } + }, + in, out); +} +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + +} // namespace + +void CpuGemmMatrixAdditionKernel::configure(const ITensorInfo *src, ITensorInfo *dst, float beta) +{ + ARM_COMPUTE_UNUSED(dst); + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + + // Perform validation step + ARM_COMPUTE_ERROR_THROW_ON(CpuGemmMatrixAdditionKernel::validate(src, dst, beta)); + + _beta = beta; + switch(src->data_type()) + { + case DataType::F32: + _func = &matrix_addition_f32; + break; + case DataType::F16: +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + _func = &matrix_addition_f16; + break; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + default: + ARM_COMPUTE_ERROR("Data type not supported"); + break; + } + + // Configure kernel window + Window win = calculate_max_window(*src, Steps()); + ICPPKernel::configure(win); +} + +Status CpuGemmMatrixAdditionKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, float beta) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_UNUSED(beta); + + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); + + if(dst->total_size() > 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); + } + return Status{}; +} + +void CpuGemmMatrixAdditionKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + ARM_COMPUTE_ERROR_ON(tensors.empty()); + + const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC); + ITensor *dst = tensors.get_tensor(TensorType::ACL_DST); + + if(_beta != 0.0f) + { + (*_func)(src, dst, window, _beta); + } +} + +const char *CpuGemmMatrixAdditionKernel::name() const +{ + return "CpuGemmMatrixAdditionKernel"; +} +} // namespace kernels +} // namespace cpu +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h index c896cabc6a..216e61b5d5 100644 --- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h +++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h @@ -21,15 +21,18 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H -#define ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H +#ifndef ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H +#define ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H -#include "src/core/NEON/INESimpleKernel.h" +#include "src/core/common/Macros.h" +#include "src/core/cpu/ICpuKernel.h" namespace arm_compute { -class ITensor; - +namespace cpu +{ +namespace kernels +{ /** Kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta: * * @note [ MTX_OUT = MTX_0 + beta * MTX_1 ] with MTX_0 and MTX_1 of the same size @@ -38,61 +41,49 @@ class ITensor; * - MTX_0 = A * B * alpha, where MTX_0 is the output of @ref NEGEMMMatrixMultiplyKernel * - MTX_1 = C */ -class NEGEMMMatrixAdditionKernel : public INESimpleKernel +class CpuGemmMatrixAdditionKernel : public ICpuKernel { public: - const char *name() const override - { - return "NEGEMMMatrixAdditionKernel"; - } /** Constructor */ - NEGEMMMatrixAdditionKernel(); - /** Prevent instances of this class from being copied */ - NEGEMMMatrixAdditionKernel(const NEGEMMMatrixAdditionKernel &) = delete; - /** Prevent instances of this class from being copied */ - NEGEMMMatrixAdditionKernel &operator=(const NEGEMMMatrixAdditionKernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMMatrixAdditionKernel(NEGEMMMatrixAdditionKernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMMatrixAdditionKernel &operator=(NEGEMMMatrixAdditionKernel &&) = default; - /** Default destructor */ - ~NEGEMMMatrixAdditionKernel() = default; + CpuGemmMatrixAdditionKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmMatrixAdditionKernel); /** Initialise the kernel's input and output. * * @note The input and output tensor must have the same dimensions * - * @param[in] input Input tensor (Matrix C). Data types supported: F16/F32 - * @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input. - * @param[in] beta Weight of matrix C + * @param[in] src Input tensor info (Matrix C). Data types supported: F16/F32 + * @param[in, out] dst Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p src. + * @param[in] beta Weight of matrix C */ - void configure(const ITensor *input, ITensor *output, float beta); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMMatrixAdditionKernel. + void configure(const ITensorInfo *src, ITensorInfo *dst, float beta); + /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmMatrixAdditionKernel. * * @note The input and output tensor must have the same dimensions * - * @param[in] input Input tensor info (Matrix C). Data types supported: F16/F32 - * @param[in] output Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input. - * @param[in] beta Weight of matrix C + * Similar to @ref CpuGemmMatrixAdditionKernel::configure() * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta); + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, float beta); // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; + void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; private: /** Common signature for all the matrix addition functions * - * @param[in] input An input tensor. Data types supported: F16/F32 - * @param[out] output The output tensor. Data type supported: same as @p input + * @param[in] src An input tensor. Data types supported: F16/F32 + * @param[out] dst The output tensor. Data type supported: same as @p src * @param[in] window Region on which to execute the kernel. * @param[in] beta Weight of matrix C */ - using MatrixAdditionFunction = void(const ITensor *input, ITensor *output, const Window &window, float beta); + using MatrixAdditionFunctionPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window, float beta); /** Matrix addition function to use for the particular tensor types passed to configure() */ - MatrixAdditionFunction *_func; - float _beta; + MatrixAdditionFunctionPtr _func{ nullptr }; + float _beta{ 0.f }; }; +} // namespace kernels +} // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H */ diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp index 20b0cabd1f..4b059f57cb 100644 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp +++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp @@ -21,13 +21,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" +#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h" #include "arm_compute/core/ITensor.h" -#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" -#include "src/core/NEON/INEKernel.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -35,90 +34,77 @@ namespace arm_compute { -namespace +namespace cpu { -TensorShape get_output_shape(const ITensorInfo *input) +namespace kernels { - TensorShape output_shape{ input->tensor_shape() }; - const size_t transpose_w = 16 / input->element_size(); - output_shape.set(0, input->dimension(1) * transpose_w); - output_shape.set(1, static_cast<size_t>(std::ceil((input->dimension(0) / static_cast<float>(transpose_w))))); - return output_shape; -} - -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); - //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. - - if(output->total_size() != 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), get_output_shape(input)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } - - return Status{}; -} -} // namespace +using namespace arm_compute::misc::shape_calculator; -void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output) +void CpuGemmTranspose1xWKernel::configure(const ITensorInfo *src, ITensorInfo *dst) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), get_output_shape(input->info()), 1, input->info()->data_type()); + auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*src))); // Perform validate step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); + ARM_COMPUTE_ERROR_THROW_ON(CpuGemmTranspose1xWKernel::validate(src, dst)); - _input = input; - _output = output; - - const size_t vector_size = 16 / input->info()->element_size(); + const size_t vector_size = 16 / src->element_size(); // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps(vector_size)); - - INEKernel::configure(win); + Window win = calculate_max_window(*src, Steps(vector_size)); + ICPPKernel::configure(win); } -Status NEGEMMTranspose1xWKernel::validate(const ITensorInfo *input, const ITensorInfo *output) +Status CpuGemmTranspose1xWKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src); + ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); + //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions. + + if(dst->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), compute_transpose1xW_with_element_size_shape(*src)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); + } return Status{}; } -void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info) +void CpuGemmTranspose1xWKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + ARM_COMPUTE_ERROR_ON(tensors.empty()); /* - * Following an example of how the transposition1xW works when the input data type is F32 + * Following an example of how the transposition1xW works when the src data type is F32 * * |a00 a01 a02 a03| * |a10 a11 a12 a13| * |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 | * |a30 a31 a32 a33| * - * The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor) + * The dst matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor) */ - // Set window for output tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications + // Set window for dst tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications Window win_out(window); win_out.set(Window::DimX, Window::Dimension(0, 0, 0)); win_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - Iterator in(_input, window); - Iterator out(_output, win_out); + const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC); + ITensor *dst = tensors.get_tensor(TensorType::ACL_DST); - const size_t in_width = _input->info()->dimension(0); - const size_t element_size = _input->info()->element_size(); - const size_t out_stride = _output->info()->strides_in_bytes()[1]; + Iterator in(src, window); + Iterator out(dst, win_out); + + const size_t in_width = src->info()->dimension(0); + const size_t element_size = src->info()->element_size(); + const size_t out_stride = dst->info()->strides_in_bytes()[1]; const size_t vector_size = 16 / element_size; execute_window_loop(window, [&](const Coordinates & id) @@ -128,7 +114,7 @@ void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info) for(size_t k = 0; k < vector_size; ++k) { - // If the input width is not multiple of W, we fill the reference with 0s + // If the src width is not multiple of W, we fill the reference with 0s if((id.x() + k) >= in_width) { std::memset(out_ptr + k * element_size, 0, element_size); @@ -141,4 +127,11 @@ void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info) }, in, out); } + +const char *CpuGemmTranspose1xWKernel::name() const +{ + return "CpuGemmTranspose1xWKernel"; +} +} // namespace kernels +} // namespace cpu } // namespace arm_compute diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h index 7ca71cf414..c9c22bd27b 100644 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h +++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h @@ -21,16 +21,18 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H -#define ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H +#ifndef ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H +#define ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H -#include "src/core/NEON/INESimpleKernel.h" +#include "src/core/common/Macros.h" +#include "src/core/cpu/ICpuKernel.h" namespace arm_compute { -// Forward declarations -class ITensor; - +namespace cpu +{ +namespace kernels +{ /** Kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / element size of the tensor) * * Following an example of how the transposition1xW works when the input data is F32 @@ -66,42 +68,31 @@ class ITensor; * @note The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor) * */ -class NEGEMMTranspose1xWKernel : public INESimpleKernel +class CpuGemmTranspose1xWKernel : public ICpuKernel { public: - const char *name() const override - { - return "NEGEMMTranspose1xWKernel"; - } /** Constructor */ - NEGEMMTranspose1xWKernel() = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMTranspose1xWKernel(const NEGEMMTranspose1xWKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMTranspose1xWKernel &operator=(const NEGEMMTranspose1xWKernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMTranspose1xWKernel(NEGEMMTranspose1xWKernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMTranspose1xWKernel &operator=(NEGEMMTranspose1xWKernel &&) = default; - /** Default destructor */ - ~NEGEMMTranspose1xWKernel() = default; - /** Initialise the kernel's input and output. + CpuGemmTranspose1xWKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmTranspose1xWKernel); + /** Configure kernel for a given list of arguments * - * @param[in] input Input tensor. Data types supported: All - * @param[out] output Output tensor. Data type supported: same as @p input. + * @param[in] src Input tensor info. Data types supported: All + * @param[out] dst Output tensor info. Data type supported: same as @p src. */ - void configure(const ITensor *input, ITensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xWKernel + void configure(const ITensorInfo *src, ITensorInfo *dst); + /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmTranspose1xWKernel * - * @param[in] input Input tensor info. Data types supported: All - * @param[in] output Output tensor info. Data type supported: same as @p input. + * Similar to @ref CpuGemmTranspose1xWKernel::configure() * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output); + static Status validate(const ITensorInfo *src, const ITensorInfo *dst); // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; + void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; }; +} // namespace kernels +} // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H */ +#endif /*ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H */ diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index f469a0bdab..daa14b1b3a 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -30,15 +30,6 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/cpu/kernels/CpuTransposeKernel.h" #include <cmath> diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp index 9b14052c75..99a7db72b6 100644 --- a/src/runtime/NEON/functions/NEGEMM.cpp +++ b/src/runtime/NEON/functions/NEGEMM.cpp @@ -31,18 +31,19 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/NEON/NEScheduler.h" +#include "arm_compute/runtime/Tensor.h" #include "arm_compute/runtime/TensorAllocator.h" #include "src/core/CPP/Validate.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" +#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h" +#include "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h" +#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/MemoryHelpers.h" +#include "src/runtime/cpu/operators/CpuActivation.h" +#include "src/runtime/cpu/operators/CpuAdd.h" #include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" -#include <cmath> - using namespace arm_compute::experimental; using namespace arm_compute::misc::shape_calculator; @@ -62,96 +63,117 @@ cpu::AsmGemmInfo init_assembly_metadata(const GEMMInfo &info) } } // namespace +struct NEGEMM::Impl +{ + MemoryGroup memory_group{}; + IWeightsManager *weights_manager{ nullptr }; + + std::unique_ptr<cpu::kernels::CpuGemmInterleave4x4Kernel> interleave_kernel{ nullptr }; + std::unique_ptr<cpu::kernels::CpuGemmTranspose1xWKernel> transpose_kernel{ nullptr }; + std::unique_ptr<NEGEMMMatrixMultiplyKernel> mm_kernel{ nullptr }; + std::unique_ptr<cpu::CpuGemmAssemblyDispatch> asm_glue{ nullptr }; + std::unique_ptr<cpu::kernels::CpuGemmMatrixAdditionKernel> ma_kernel{ nullptr }; + std::unique_ptr<cpu::CpuActivation> alpha_scale_func{ nullptr }; + std::unique_ptr<cpu::CpuAdd> add_bias{ nullptr }; + std::unique_ptr<cpu::CpuActivation> activation_func{ nullptr }; + + const ITensor *a{ nullptr }; + const ITensor *c{ nullptr }; + ITensor *d{ nullptr }; + ITensor *gemm_output_to_use{ nullptr }; + Tensor tmp_a{}; + Tensor tmp_b{}; + Tensor tmp_d{}; + const ITensor *original_b{ nullptr }; + bool run_vector_matrix_multiplication{ false }; + bool run_alpha_scale{ false }; + bool run_addition{ false }; + bool run_bias_addition{ false }; + bool run_activation{ false }; + bool reshape_b_only_on_first_run{ false }; + bool is_prepared{ false }; + + ITensorPack asm_glue_run_pack{}; + ITensorPack asm_glue_prep_pack{}; + WorkspaceData<Tensor> asm_glue_workspace{}; + experimental::MemoryRequirements aux_mem_req{}; +}; + NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager) - : _memory_group(memory_manager), - _weights_manager(weights_manager), - _interleave_kernel(), - _transpose_kernel(), - _mm_kernel(), - _asm_glue(std::make_unique<cpu::CpuGemmAssemblyDispatch>()), - _ma_kernel(), - _alpha_scale_func(nullptr), - _add_bias(), - _activation_func(), - _tmp_a(), - _tmp_b(), - _tmp_d(), - _original_b(nullptr), - _run_vector_matrix_multiplication(false), - _run_alpha_scale(false), - _run_addition(false), - _run_bias_addition(false), - _run_activation(false), - _reshape_b_only_on_first_run(false), - _is_prepared(false), - _asm_glue_run_pack(), - _asm_glue_prep_pack(), - _asm_glue_workspace(), - _aux_mem_req() + : _impl(std::make_unique<Impl>()) { + _impl->memory_group = MemoryGroup(std::move(memory_manager)); + _impl->weights_manager = weights_manager; } NEGEMM::~NEGEMM() = default; void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info) { + ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d); ARM_COMPUTE_ERROR_THROW_ON(NEGEMM::validate(a->info(), b->info(), (c != nullptr) ? c->info() : nullptr, d->info(), alpha, beta, gemm_info)); const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info); const bool is_c_bias = gemm_info.reshape_b_only_on_first_run(); bool run_optimised = bool(cpu::CpuGemmAssemblyDispatch::validate(a->info(), b->info(), (is_c_bias && c != nullptr) ? c->info() : nullptr, d->info(), asm_info)); + _impl->a = a; + _impl->c = c; + _impl->d = d; + _impl->gemm_output_to_use = d; // Check if we need to reshape the matrix B only on the first run - _is_prepared = false; - _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); - _run_vector_matrix_multiplication = a->info()->dimension(1) < 2; - _original_b = b; - _run_alpha_scale = alpha != 1.f; - _run_bias_addition = c != nullptr && gemm_info.reshape_b_only_on_first_run(); - _run_addition = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run(); - _run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info()))); + _impl->is_prepared = false; + _impl->reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); + _impl->run_vector_matrix_multiplication = a->info()->dimension(1) < 2; + _impl->original_b = b; + _impl->run_alpha_scale = alpha != 1.f; + _impl->run_bias_addition = c != nullptr && gemm_info.reshape_b_only_on_first_run(); + _impl->run_addition = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run(); + _impl->run_activation = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised + && !cpu::CpuGemmAssemblyDispatch::is_activation_supported(gemm_info.activation_info()))); if(run_optimised) { const ITensor *c_to_use = is_c_bias ? c : nullptr; const ITensorInfo *c_info_to_use = c_to_use != nullptr ? c_to_use->info() : nullptr; - _asm_glue->configure(a->info(), b->info(), c_info_to_use, d->info(), asm_info); - ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured()); + _impl->asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>(); + _impl->asm_glue->configure(a->info(), b->info(), c_info_to_use, d->info(), asm_info); + ARM_COMPUTE_ERROR_ON(!_impl->asm_glue->is_configured()); - _aux_mem_req = _asm_glue->workspace(); - _asm_glue_run_pack = + _impl->aux_mem_req = _impl->asm_glue->workspace(); + _impl->asm_glue_run_pack = { { ACL_SRC_0, a }, { ACL_SRC_1, b }, { ACL_SRC_2, c_to_use }, { ACL_DST, d }, }; - _asm_glue_prep_pack = { { ACL_SRC_1, b }, { ACL_SRC_2, c_to_use } }; - _asm_glue_workspace = manage_workspace<Tensor>(_aux_mem_req, _memory_group, _asm_glue_run_pack, _asm_glue_prep_pack); + _impl->asm_glue_prep_pack = { { ACL_SRC_1, b }, { ACL_SRC_2, c_to_use } }; + _impl->asm_glue_workspace = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->asm_glue_run_pack, _impl->asm_glue_prep_pack); // Scale product by alpha - if(_run_alpha_scale) + if(_impl->run_alpha_scale) { - _alpha_scale_func.configure(d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f)); + _impl->alpha_scale_func = std::make_unique<cpu::CpuActivation>(); + _impl->alpha_scale_func->configure(d->info(), nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f)); } } else { // Pick output tensor in case bias addition should be performed - ITensor *gemm_output_to_use = d; - if(_run_bias_addition) + if(_impl->run_bias_addition) { - gemm_output_to_use = &_tmp_d; - _memory_group.manage(&_tmp_d); + _impl->gemm_output_to_use = &_impl->tmp_d; + _impl->memory_group.manage(&_impl->tmp_d); } - _mm_kernel = std::make_unique<NEGEMMMatrixMultiplyKernel>(); + _impl->mm_kernel = std::make_unique<NEGEMMMatrixMultiplyKernel>(); // Select between GEMV and GEMM - if(_run_vector_matrix_multiplication) + if(_impl->run_vector_matrix_multiplication) { // Configure the matrix multiply kernel - _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false); + _impl->mm_kernel->configure(a, b, _impl->gemm_output_to_use, alpha, false); } else { @@ -168,14 +190,14 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe TensorInfo info_a = a->info()->clone()->set_tensor_shape(shape_tmp_a).set_is_resizable(true); TensorInfo info_b = b->info()->clone()->set_tensor_shape(shape_tmp_b).set_is_resizable(true); - _tmp_a.allocator()->init(info_a); - _tmp_b.allocator()->init(info_b); + _impl->tmp_a.allocator()->init(info_a); + _impl->tmp_b.allocator()->init(info_b); // Manage intermediate buffers - _memory_group.manage(&_tmp_a); - if(!_reshape_b_only_on_first_run) + _impl->memory_group.manage(&_impl->tmp_a); + if(!_impl->reshape_b_only_on_first_run) { - _memory_group.manage(&_tmp_b); + _impl->memory_group.manage(&_impl->tmp_b); } int m = a->info()->dimension(1); @@ -183,43 +205,45 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe int k = a->info()->dimension(0); // Configure interleave kernel - _interleave_kernel = std::make_unique<NEGEMMInterleave4x4Kernel>(); - _interleave_kernel->configure(a, &_tmp_a); + _impl->interleave_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>(); + _impl->interleave_kernel->configure(a->info(), &info_a); // Configure transpose kernel - _transpose_kernel = std::make_unique<NEGEMMTranspose1xWKernel>(); - _transpose_kernel->configure(b, &_tmp_b); + _impl->transpose_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>(); + _impl->transpose_kernel->configure(b->info(), _impl->tmp_b.info()); // Configure matrix multiplication kernel - _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k)); + _impl->mm_kernel->configure(&_impl->tmp_a, &_impl->tmp_b, _impl->gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k)); // Allocate once the all configure methods have been called - _tmp_a.allocator()->allocate(); - if(!_reshape_b_only_on_first_run) + _impl->tmp_a.allocator()->allocate(); + if(!_impl->reshape_b_only_on_first_run) { - _tmp_b.allocator()->allocate(); + _impl->tmp_b.allocator()->allocate(); } } - if(_run_bias_addition) + if(_impl->run_bias_addition) { - _add_bias.configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE); - _tmp_d.allocator()->allocate(); + _impl->add_bias = std::make_unique<cpu::CpuAdd>(); + _impl->add_bias->configure(_impl->gemm_output_to_use->info(), c->info(), d->info(), ConvertPolicy::SATURATE); + _impl->tmp_d.allocator()->allocate(); } } // Configure matrix addition kernel - if(_run_addition) + if(_impl->run_addition) { - _ma_kernel = std::make_unique<NEGEMMMatrixAdditionKernel>(); - _ma_kernel->configure(c, d, beta); + _impl->ma_kernel = std::make_unique<cpu::kernels::CpuGemmMatrixAdditionKernel>(); + _impl->ma_kernel->configure(c->info(), d->info(), beta); } // Configure activation const ActivationLayerInfo &activation = gemm_info.activation_info(); - if(_run_activation) + if(_impl->run_activation) { - _activation_func.configure(d, nullptr, activation); + _impl->activation_func = std::make_unique<cpu::CpuActivation>(); + _impl->activation_func->configure(d->info(), nullptr, activation); } } @@ -285,7 +309,7 @@ Status NEGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso const bool run_interleave_transpose = !run_vector_matrix_multiplication && !(gemm_info.reshape_b_only_on_first_run()); // Arguments used by GEMMReshapeInfo - // If we pass the matrix A and matrix B reshaped to NEGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to NEGEMMReshapeInfo + // If we pass the matrix A and matrix B reshaped to NEGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to GEMMReshapeInfo // in order to know how the matrices have been reshaped const int m = a->dimension(1); const int n = b->dimension(0); @@ -309,11 +333,11 @@ Status NEGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso // Validate interleave kernel auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()))); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &tmp_a_info)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a, &tmp_a_info)); // Validate transpose kernel auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width))); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info)); } // Validate matrix multiply @@ -322,21 +346,21 @@ Status NEGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso if(c != nullptr && gemm_info.reshape_b_only_on_first_run()) { - ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&tmp_output_info, c, output, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuAdd::validate(&tmp_output_info, c, output, ConvertPolicy::SATURATE)); } } // Validate matrix addition kernel if(beta != 0 && c != nullptr && !is_c_bias) { - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAdditionKernel::validate(c, output, beta)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmMatrixAdditionKernel::validate(c, output, beta)); } // Validate activation const ActivationLayerInfo &activation = gemm_info.activation_info(); if(activation.enabled()) { - ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, activation)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuActivation::validate(output, nullptr, activation)); } return Status{}; @@ -346,90 +370,97 @@ void NEGEMM::run() { prepare(); - MemoryGroupResourceScope scope_mg(_memory_group); + MemoryGroupResourceScope scope_mg(_impl->memory_group); - if(_asm_glue->is_configured()) + if(_impl->asm_glue->is_configured()) { - _asm_glue->run(_asm_glue_run_pack); - if(_run_alpha_scale) + _impl->asm_glue->run(_impl->asm_glue_run_pack); + if(_impl->run_alpha_scale) { - _alpha_scale_func.run(); + ITensorPack pack{ { ACL_SRC, _impl->d }, { ACL_DST, _impl->d } }; + _impl->alpha_scale_func->run(pack); } } else { - if(!_run_vector_matrix_multiplication) + if(!_impl->run_vector_matrix_multiplication) { // Run interleave kernel - NEScheduler::get().schedule(_interleave_kernel.get(), Window::DimY); + ITensorPack interleave_pack{ { ACL_SRC, _impl->a }, { ACL_DST, &_impl->tmp_a } }; + NEScheduler::get().schedule_op(_impl->interleave_kernel.get(), Window::DimY, _impl->interleave_kernel->window(), interleave_pack); - if(!_reshape_b_only_on_first_run) + if(!_impl->reshape_b_only_on_first_run) { // Run transpose kernel - NEScheduler::get().schedule(_transpose_kernel.get(), Window::DimY); + ITensorPack transpose_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } }; + NEScheduler::get().schedule_op(_impl->transpose_kernel.get(), Window::DimY, _impl->transpose_kernel->window(), transpose_pack); } } - NEScheduler::get().schedule(_mm_kernel.get(), _run_vector_matrix_multiplication ? Window::DimX : Window::DimY); + NEScheduler::get().schedule(_impl->mm_kernel.get(), _impl->run_vector_matrix_multiplication ? Window::DimX : Window::DimY); // Run bias addition kernel - if(_run_bias_addition) + if(_impl->run_bias_addition) { - _add_bias.run(); + ITensorPack pack{ { ACL_SRC_0, _impl->gemm_output_to_use }, { ACL_SRC_1, _impl->c }, { ACL_DST, _impl->d } }; + _impl->add_bias->run(pack); } } // Run matrix addition kernel - if(_run_addition) + if(_impl->run_addition) { - NEScheduler::get().schedule(_ma_kernel.get(), Window::DimY); + ITensorPack c_add_pack{ { ACL_SRC, _impl->c }, { ACL_DST, _impl->d } }; + NEScheduler::get().schedule_op(_impl->ma_kernel.get(), Window::DimY, _impl->ma_kernel->window(), c_add_pack); } // Run activation function - if(_run_activation) + if(_impl->run_activation) { - _activation_func.run(); + ITensorPack pack{ { ACL_SRC, _impl->d }, { ACL_DST, _impl->d } }; + _impl->activation_func->run(pack); } } void NEGEMM::prepare() { - if(!_is_prepared) + if(!_impl->is_prepared) { - const bool original_b_managed_by_weights_manager = _weights_manager && _weights_manager->are_weights_managed(_original_b); - if(_asm_glue->is_configured()) + const bool original_b_managed_by_weights_manager = _impl->weights_manager && _impl->weights_manager->are_weights_managed(_impl->original_b); + if(_impl->asm_glue->is_configured()) { - _asm_glue->prepare(_asm_glue_prep_pack); + _impl->asm_glue->prepare(_impl->asm_glue_prep_pack); - auto has_reshape = std::find_if(_aux_mem_req.begin(), - _aux_mem_req.end(), + auto has_reshape = std::find_if(_impl->aux_mem_req.begin(), + _impl->aux_mem_req.end(), [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; }); - if(has_reshape != std::end(_aux_mem_req)) + if(has_reshape != std::end(_impl->aux_mem_req)) { - _original_b->mark_as_unused(); + _impl->original_b->mark_as_unused(); } else { - _asm_glue_run_pack.add_const_tensor(ACL_SRC_1, _original_b); + _impl->asm_glue_run_pack.add_const_tensor(ACL_SRC_1, _impl->original_b); } } - else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication && !_asm_glue->is_configured()) + else if(_impl->reshape_b_only_on_first_run && !_impl->run_vector_matrix_multiplication && !_impl->asm_glue->is_configured()) { if(!original_b_managed_by_weights_manager) { - ARM_COMPUTE_ERROR_ON(!_original_b->is_used()); + ARM_COMPUTE_ERROR_ON(!_impl->original_b->is_used()); } - _tmp_b.allocator()->allocate(); - NEScheduler::get().schedule(_transpose_kernel.get(), Window::DimY); + _impl->tmp_b.allocator()->allocate(); + ITensorPack transpose_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } }; + NEScheduler::get().schedule_op(_impl->transpose_kernel.get(), Window::DimY, _impl->transpose_kernel->window(), transpose_pack); if(!original_b_managed_by_weights_manager) { - _original_b->mark_as_unused(); + _impl->original_b->mark_as_unused(); } } - _is_prepared = true; + _impl->is_prepared = true; } } } // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp index 2876c254fa..f40cbda779 100644 --- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp @@ -31,16 +31,6 @@ #include "arm_compute/runtime/NEON/NEScheduler.h" #include "src/core/NEON/kernels/NECol2ImKernel.h" -#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/NEON/kernels/NEIm2ColKernel.h" #include "src/core/NEON/kernels/NEWeightsReshapeKernel.h" diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp index 224fb1eb56..0aba3c03ec 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp @@ -41,12 +41,12 @@ #include "arm_compute/runtime/MemoryGroup.h" #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h" #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" +#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h" +#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h" #include "src/runtime/cpu/operators/internal/CpuGemmAssemblyDispatch.h" namespace arm_compute @@ -72,8 +72,8 @@ struct NEGEMMLowpMatrixMultiplyCore::Impl IWeightsManager *weights_manager{ nullptr }; std::unique_ptr<cpu::CpuGemmAssemblyDispatch> asm_glue{ nullptr }; std::unique_ptr<NEGEMMLowpMatrixMultiplyKernel> mm_kernel{ nullptr }; - std::unique_ptr<NEGEMMInterleave4x4Kernel> mtx_a_reshape_kernel{ nullptr }; - std::unique_ptr<NEGEMMTranspose1xWKernel> mtx_b_reshape_kernel{ nullptr }; + std::unique_ptr<cpu::kernels::CpuGemmInterleave4x4Kernel> mtx_a_reshape_kernel{ nullptr }; + std::unique_ptr<cpu::kernels::CpuGemmTranspose1xWKernel> mtx_b_reshape_kernel{ nullptr }; std::unique_ptr<NEGEMMLowpMatrixAReductionKernel> mtx_a_reduction_kernel{ nullptr }; std::unique_ptr<NEGEMMLowpMatrixBReductionKernel> mtx_b_reduction_kernel{ nullptr }; std::unique_ptr<NEGEMMLowpOffsetContributionKernel> offset_contribution_kernel{ nullptr }; @@ -82,6 +82,7 @@ struct NEGEMMLowpMatrixMultiplyCore::Impl std::unique_ptr<NEConvertQuantizedSignednessKernel> convert_to_signed_asymm{ nullptr }; std::unique_ptr<NEConvertQuantizedSignednessKernel> convert_from_signed_asymm{ nullptr }; + const ITensor *a_to_use{ nullptr }; Tensor vector_sum_col{}; Tensor vector_sum_row{}; Tensor tmp_a{}; @@ -142,20 +143,20 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, _impl->asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>(); - const ITensor *a_to_use = a; + _impl->a_to_use = a; // Convert to QASYMM8 -> QASYMM8_SIGNED and back if(_impl->flip_signedness) { const int32_t offset_correction = 128; const DataType dt = DataType::QASYMM8_SIGNED; - const UniformQuantizationInfo iqinfo = a_to_use->info()->quantization_info().uniform(); + const UniformQuantizationInfo iqinfo = _impl->a_to_use->info()->quantization_info().uniform(); - _impl->signed_a.allocator()->init(a_to_use->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction))); + _impl->signed_a.allocator()->init(_impl->a_to_use->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction))); _impl->memory_group.manage(&_impl->signed_a); _impl->convert_to_signed_asymm = std::make_unique<NEConvertQuantizedSignednessKernel>(); - _impl->convert_to_signed_asymm->configure(a_to_use, &_impl->signed_a); - a_to_use = &_impl->signed_a; + _impl->convert_to_signed_asymm->configure(_impl->a_to_use, &_impl->signed_a); + _impl->a_to_use = &_impl->signed_a; _impl->a_offset = _impl->signed_a.info()->quantization_info().uniform().offset; const UniformQuantizationInfo oqinfo = output->info()->quantization_info().uniform(); @@ -192,10 +193,10 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, case DataType::U8: case DataType::S8: { - if(is_data_type_quantized_asymmetric(a_to_use->info()->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) + if(is_data_type_quantized_asymmetric(_impl->a_to_use->info()->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT) { auto c_info_to_use = c == nullptr ? nullptr : c->info(); - _impl->asm_glue->configure(a_to_use->info(), b->info(), c_info_to_use, output->info(), asm_info); + _impl->asm_glue->configure(_impl->a_to_use->info(), b->info(), c_info_to_use, output->info(), asm_info); _impl->fused_assembly_path = _impl->asm_glue->is_configured(); _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_2, c); _impl->asm_glue_run_pack.add_tensor(TensorType::ACL_DST, output); @@ -203,14 +204,14 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, else { auto output_to_use = (_impl->fuse_output_stage ? &_impl->mm_result_s32 : output); - _impl->asm_glue->configure(a_to_use->info(), b->info(), nullptr, output_to_use->info(), asm_info); + _impl->asm_glue->configure(_impl->a_to_use->info(), b->info(), nullptr, output_to_use->info(), asm_info); _impl->asm_glue_run_pack.add_tensor(TensorType::ACL_DST, output_to_use); } _impl->assembly_path = _impl->asm_glue->is_configured(); if(_impl->assembly_path) { - _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_0, a_to_use); + _impl->asm_glue_run_pack.add_const_tensor(TensorType::ACL_SRC_0, _impl->a_to_use); _impl->aux_mem_req = _impl->asm_glue->workspace(); _impl->asm_glue_prep_pack = { { TensorType::ACL_SRC_1, b }, { TensorType::ACL_SRC_2, c } }; @@ -232,7 +233,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, matrix_b = &_impl->tmp_b; // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] - TensorInfo a_info(compute_interleaved_shape(*a_to_use->info()), 1, a_to_use->info()->data_type(), a_to_use->info()->quantization_info()); + TensorInfo a_info(compute_interleaved_shape(*_impl->a_to_use->info()), 1, _impl->a_to_use->info()->data_type(), _impl->a_to_use->info()->quantization_info()); // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ] TensorInfo b_info(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type(), b->info()->quantization_info()); _impl->tmp_a.allocator()->init(a_info); @@ -244,18 +245,18 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, } // Configure interleave kernel - _impl->mtx_a_reshape_kernel = std::make_unique<NEGEMMInterleave4x4Kernel>(); - _impl->mtx_a_reshape_kernel->configure(a_to_use, &_impl->tmp_a); + _impl->mtx_a_reshape_kernel = std::make_unique<cpu::kernels::CpuGemmInterleave4x4Kernel>(); + _impl->mtx_a_reshape_kernel->configure(_impl->a_to_use->info(), _impl->tmp_a.info()); // Configure transpose kernel - _impl->mtx_b_reshape_kernel = std::make_unique<NEGEMMTranspose1xWKernel>(); - _impl->mtx_b_reshape_kernel->configure(b, &_impl->tmp_b); + _impl->mtx_b_reshape_kernel = std::make_unique<cpu::kernels::CpuGemmTranspose1xWKernel>(); + _impl->mtx_b_reshape_kernel->configure(b->info(), _impl->tmp_b.info()); } if(!_impl->fused_assembly_path) { // Build reduction info - const GEMMLowpReductionKernelInfo reduction_info(a_to_use->info()->dimension(0), false, 0, false); + const GEMMLowpReductionKernelInfo reduction_info(_impl->a_to_use->info()->dimension(0), false, 0, false); // Initialize matrix B reduction kernel only if _impl->a_offset is not equal to 0 if(_impl->a_offset != 0) @@ -276,14 +277,14 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, // Initialize Matrix A reduction kernel only if _impl->b_offset is not equal to 0 if(_impl->b_offset != 0) { - TensorInfo info_vector_sum_row(compute_reductionB_shape(*a_to_use->info()), 1, DataType::S32); + TensorInfo info_vector_sum_row(compute_reductionB_shape(*_impl->a_to_use->info()), 1, DataType::S32); _impl->vector_sum_row.allocator()->init(info_vector_sum_row); _impl->memory_group.manage(&_impl->vector_sum_row); // Configure matrix A reduction kernel _impl->mtx_a_reduction_kernel = std::make_unique<NEGEMMLowpMatrixAReductionKernel>(); - _impl->mtx_a_reduction_kernel->configure(a_to_use, &_impl->vector_sum_row, reduction_info); + _impl->mtx_a_reduction_kernel->configure(_impl->a_to_use, &_impl->vector_sum_row, reduction_info); } if(_impl->fuse_output_stage) @@ -319,7 +320,8 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, } // Configure offset contribution kernel _impl->offset_contribution_kernel = std::make_unique<NEGEMMLowpOffsetContributionKernel>(); - _impl->offset_contribution_kernel->configure(output, _impl->a_offset == 0 ? nullptr : &_impl->vector_sum_col, _impl->b_offset == 0 ? nullptr : &_impl->vector_sum_row, a_to_use->info()->dimension(0), + _impl->offset_contribution_kernel->configure(output, _impl->a_offset == 0 ? nullptr : &_impl->vector_sum_col, _impl->b_offset == 0 ? nullptr : &_impl->vector_sum_row, + _impl->a_to_use->info()->dimension(0), _impl->a_offset, _impl->b_offset); } } @@ -487,8 +489,8 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso auto_init_if_empty(tmp_a_info, a_to_use->clone()->set_tensor_shape(shape_tmp_a)); auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(shape_tmp_b)); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a_to_use, &tmp_a_info)); - ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmInterleave4x4Kernel::validate(a_to_use, &tmp_a_info)); + ARM_COMPUTE_RETURN_ON_ERROR(cpu::kernels::CpuGemmTranspose1xWKernel::validate(b, &tmp_b_info)); } } @@ -585,12 +587,14 @@ void NEGEMMLowpMatrixMultiplyCore::run() if(!_impl->run_vector_matrix_multiplication) { // Run interleave kernel - NEScheduler::get().schedule(_impl->mtx_a_reshape_kernel.get(), Window::DimY); + ITensorPack interleave_pack{ { ACL_SRC, _impl->a_to_use }, { ACL_DST, &_impl->tmp_a } }; + NEScheduler::get().schedule_op(_impl->mtx_a_reshape_kernel.get(), Window::DimY, _impl->mtx_a_reshape_kernel->window(), interleave_pack); if(!_impl->reshape_b_only_on_first_run) { // Run transpose kernel - NEScheduler::get().schedule(_impl->mtx_b_reshape_kernel.get(), Window::DimY); + ITensorPack reshape_b_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } }; + NEScheduler::get().schedule_op(_impl->mtx_b_reshape_kernel.get(), Window::DimY, _impl->mtx_b_reshape_kernel->window(), reshape_b_pack); } } NEScheduler::get().schedule(_impl->mm_kernel.get(), Window::DimY); @@ -662,7 +666,8 @@ void NEGEMMLowpMatrixMultiplyCore::prepare() { // Run reshape kernel and mark original weights tensor as unused _impl->tmp_b.allocator()->allocate(); - NEScheduler::get().schedule(_impl->mtx_b_reshape_kernel.get(), Window::DimY); + ITensorPack reshape_b_pack{ { ACL_SRC, _impl->original_b }, { ACL_DST, &_impl->tmp_b } }; + NEScheduler::get().schedule_op(_impl->mtx_b_reshape_kernel.get(), Window::DimY, _impl->mtx_b_reshape_kernel->window(), reshape_b_pack); } // Run matrix B reduction kernel only if _impl->a_offset is not equal to 0 diff --git a/src/runtime/NEON/functions/NELSTMLayer.cpp b/src/runtime/NEON/functions/NELSTMLayer.cpp index d338e4fd2d..2eb5e9a61e 100644 --- a/src/runtime/NEON/functions/NELSTMLayer.cpp +++ b/src/runtime/NEON/functions/NELSTMLayer.cpp @@ -29,15 +29,6 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/common/LSTMParams.h" -#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" namespace arm_compute { diff --git a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp index a56e16864a..eb7d584da2 100644 --- a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp +++ b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp @@ -26,15 +26,6 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" -#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/helpers/AutoConfiguration.h" #include <cmath> diff --git a/src/runtime/NEON/functions/NEQLSTMLayer.cpp b/src/runtime/NEON/functions/NEQLSTMLayer.cpp index 85d62ac058..f3a3d23256 100644 --- a/src/runtime/NEON/functions/NEQLSTMLayer.cpp +++ b/src/runtime/NEON/functions/NEQLSTMLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2020-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,12 +31,10 @@ #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/runtime/NEON/NEScheduler.h" #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h" #include "src/core/helpers/WindowHelpers.h" diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp index d59f7da0dd..6f6d4d942d 100644 --- a/src/runtime/NEON/functions/NERNNLayer.cpp +++ b/src/runtime/NEON/functions/NERNNLayer.cpp @@ -30,15 +30,6 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" -#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" namespace arm_compute { |