From 93b75e0c072c3cc5654fcdf6aed1068b40012081 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Mon, 21 Jun 2021 12:00:43 +0100 Subject: Port NEGEMM to memory injecting interface (Part 1) - Start porting NEGEMM to the new API - Port NEGEMMInterleave4x4Kernel to the new API - Port NEGEMMMatrixAdditionKernel to the new API - Port NEGEMMTranspose1xWKernel to the new API - Remove padding from NEGEMMMatrixAdditionKernel - Remove unused INESimpleKernel and ICPPSimpleKernel Partially resolves: COMPMID-4402 Change-Id: I63edadddfe00a54586e5384d6a0211db25ae9042 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5857 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- src/core/NEON/INESimpleKernel.h | 34 ---- src/core/NEON/NEKernels.h | 3 - .../NEON/kernels/NEGEMMInterleave4x4Kernel.cpp | 187 --------------------- src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h | 112 ------------ .../kernels/NEGEMMLowpMatrixMultiplyKernel.cpp | 2 +- .../NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h | 2 +- .../NEON/kernels/NEGEMMMatrixAdditionKernel.cpp | 164 ------------------ src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h | 98 ----------- src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h | 8 +- src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp | 144 ---------------- src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h | 107 ------------ 11 files changed, 6 insertions(+), 855 deletions(-) delete mode 100644 src/core/NEON/INESimpleKernel.h delete mode 100644 src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp delete mode 100644 src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h delete mode 100644 src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp delete mode 100644 src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h delete mode 100644 src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp delete mode 100644 src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h (limited to 'src/core/NEON') diff --git a/src/core/NEON/INESimpleKernel.h b/src/core/NEON/INESimpleKernel.h deleted file mode 100644 index 2986e7b5c9..0000000000 --- a/src/core/NEON/INESimpleKernel.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_INESIMPLEKERNEL_H -#define ARM_COMPUTE_INESIMPLEKERNEL_H - -#include "arm_compute/core/CPP/ICPPSimpleKernel.h" - -namespace arm_compute -{ -/** Interface for simple CPU kernels having 1 tensor input and 1 tensor output */ -using INESimpleKernel = ICPPSimpleKernel; -} // namespace arm_compute -#endif /*ARM_COMPUTE_INESIMPLEKERNEL_H */ diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h index 268871a4e8..0f7475c0b5 100644 --- a/src/core/NEON/NEKernels.h +++ b/src/core/NEON/NEKernels.h @@ -41,14 +41,11 @@ #include "src/core/NEON/kernels/NEFFTScaleKernel.h" #include "src/core/NEON/kernels/NEFillBorderKernel.h" #include "src/core/NEON/kernels/NEFuseBatchNormalizationKernel.h" -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h" #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h" -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" #include "src/core/NEON/kernels/NEGatherKernel.h" #include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h" #include "src/core/NEON/kernels/NEIm2ColKernel.h" diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp deleted file mode 100644 index 9011680c9b..0000000000 --- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "src/core/NEON/INEKernel.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" - -#include -#include -#include -#include - -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - - if(output->total_size() != 0) - { - TensorShape output_shape = input->tensor_shape(); - output_shape.set(0, input->dimension(0) * 4); - output_shape.set(1, std::ceil(input->dimension(1) / 4.0f)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } - - return Status{}; -} -} // namespace - -NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel() - : _func(nullptr) -{ -} - -void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_interleaved_shape(*input->info()))); - - // Perform validate step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); - - _input = input; - _output = output; - - switch(input->info()->element_size()) - { - case 1: - _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4; - break; - case 2: - _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4; - break; - case 4: - _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4; - break; - default: - ARM_COMPUTE_ERROR_ON("Element size not supported"); - break; - } - - Window win = calculate_max_window(*input->info(), Steps(1, 4)); - - INEKernel::configure(win); -} - -Status NEGEMMInterleave4x4Kernel::validate(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); - - return Status{}; -} - -template -void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window) -{ - const size_t window_start_x = window.x().start(); - const size_t window_end_x = window.x().end(); - - const size_t in_height = input->info()->dimension(1); - const size_t in_stride = input->info()->strides_in_bytes()[1]; - - const size_t partial_y = in_height % 4; - - // Set window for the input tensor - Window win = window; - win.set(Window::DimX, Window::Dimension(0, 1, 1)); - - // Set window for the output tensor - Window win_out(window); - win_out.set(Window::DimX, Window::Dimension(0, 1, 1)); - win_out.scale(Window::DimY, 0.25f); - - Iterator in(input, win); - Iterator out(output, win_out); - - execute_window_loop(win, [&](const Coordinates & id) - { - if(id.y() + 4 <= static_cast(in_height)) - { - for(size_t x = window_start_x; x < window_end_x; ++x) - { - const ScalarType data[4] = - { - *(reinterpret_cast(in.ptr() + 0 * in_stride) + x), - *(reinterpret_cast(in.ptr() + 1 * in_stride) + x), - *(reinterpret_cast(in.ptr() + 2 * in_stride) + x), - *(reinterpret_cast(in.ptr() + 3 * in_stride) + x), - }; - std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType)); - } - } - else - { - for(size_t x = window_start_x; x < window_end_x; ++x) - { - ScalarType data[4] = { 0, 0, 0, 0 }; - - for(size_t y = 0; y < partial_y; ++y) - { - data[y] = *(reinterpret_cast(in.ptr() + y * in_stride) + x); - } - - std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType)); - } - } - }, - in, out); -} - -void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - ARM_COMPUTE_ERROR_ON(_func == nullptr); - /* - * This kernel puts the values in a 4x4 block of Matrix A on the same row (Interleaved values) - * |a00 a01 a02 a03| - * |a10 a11 a12 a13| - * |a20 a21 a22 a23| = | a00 a10 a20 a30 || a01 a11 a21 a31 || a02 a12 a22 a32 || a03 a13 a23 a33 | - * |a30 a31 a32 a33| - * - * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ] - */ - (this->*_func)(_input, _output, window); -} diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h deleted file mode 100644 index e592d5ef6e..0000000000 --- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H -#define ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H - -#include "src/core/NEON/INESimpleKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Kernel to interleave the elements of a matrix - * - * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values) - * - * @f[ - * \left( \begin{array}{cccc} - * a00 & a01 & a02 & a03 \\ - * a10 & a11 & a12 & a13 \\ - * a20 & a21 & a22 & a23 \\ - * a30 & a31 & a32 & a33 \\ - * \end{array} \right) - * \rightarrow - * \left( \begin{array}{ccccccccccccccccc} - * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\ - * \end{array} \right) - * @f] - * - * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ] - */ -class NEGEMMInterleave4x4Kernel : public INESimpleKernel -{ -public: - const char *name() const override - { - return "NEGEMMInterleave4x4Kernel"; - } - /** Constructor */ - NEGEMMInterleave4x4Kernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMInterleave4x4Kernel(const NEGEMMInterleave4x4Kernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMInterleave4x4Kernel &operator=(const NEGEMMInterleave4x4Kernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMInterleave4x4Kernel(NEGEMMInterleave4x4Kernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMInterleave4x4Kernel &operator=(NEGEMMInterleave4x4Kernel &&) = default; - /** Default destructor */ - ~NEGEMMInterleave4x4Kernel() = default; - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: All - * @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input. - */ - void configure(const ITensor *input, ITensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMInterleave4x4Kernel - * - * @param[in] input Input tensor info. Data types supported: All - * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - /** Template function to run gemm interleave 4x4 - * - * @tparam ScalarType Scalar datatype - * - * @param[in] input Input tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - template - void gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window); - - /** Common signature for all the specialised gemm interleave 4x4 functions - * - * @param[in] input Input tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - using GEMMInterleaveFunctionFuncPtr = void (NEGEMMInterleave4x4Kernel::*)(const ITensor *input, ITensor *output, const Window &window); - - GEMMInterleaveFunctionFuncPtr _func; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H*/ diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp index b95bdd4ca5..6bcf59ee96 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp @@ -686,7 +686,7 @@ void inline matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &out, int const auto width_out = static_cast(out_info.dimension(0)); const auto height_out = static_cast(out_info.dimension(1)); const size_t out_stride = out_info.strides_in_bytes()[1] / out_info.element_size(); - // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW + // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with CpuGemmInterleave4x4 and CpuGemmTranspose1xW // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration // All the values needed for computing a single 4x4 block will be read from consecutive memory positions execute_window_loop(window, [&](const Coordinates & id) diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h index acfb79edeb..b9a1b5e840 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h @@ -61,7 +61,7 @@ public: ~NEGEMMLowpMatrixMultiplyKernel() = default; /** Initialise the kernel's input and output. * - * The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two + * The input matrices @p input0 and @p input1 must be the output of the kernels: cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel. These two * kernels change the layout of the original matrices to be more cache-friendly. * * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: U8/QASYMM8/S8/QASYMM8_SIGNED diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp deleted file mode 100644 index 6a2802a991..0000000000 --- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "src/core/CPP/Validate.h" -#include "src/core/NEON/NEFixedPoint.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" - -#include - -namespace arm_compute -{ -namespace -{ -constexpr unsigned int num_elems_processed_per_iteration = 16; - -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float beta) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_UNUSED(beta); - - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); - - if(output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); - } - - return Status{}; -} - -void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &window, float beta) -{ - const float32x4_t beta_f32 = vdupq_n_f32(beta); - - Iterator in(input, window); - Iterator out(output, window); - - execute_window_loop(window, [&](const Coordinates &) - { - const auto in_ptr = reinterpret_cast(in.ptr()); - const auto out_ptr = reinterpret_cast(out.ptr()); - - float32x4x4_t alpha_ab = vld4q_f32(out_ptr); - const float32x4x4_t c = vld4q_f32(in_ptr); - - // Multiply matrix C by its weight and accumulate - alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32); - alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32); - alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32); - alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32); - - vst4q_f32(out_ptr, alpha_ab); - }, - in, out); -} - -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &window, float beta) -{ - const float16x8_t beta_f16 = vdupq_n_f16(beta); - - Iterator in(input, window); - Iterator out(output, window); - - execute_window_loop(window, [&](const Coordinates &) - { - const auto in_ptr = reinterpret_cast(in.ptr()); - const auto out_ptr = reinterpret_cast(out.ptr()); - - float16x8x2_t alpha_ab = vld2q_f16(out_ptr); - const float16x8x2_t c = vld2q_f16(in_ptr); - // Multiply matrix C by its weight and accumulate - alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16)); - alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16)); - - vst2q_f16(out_ptr + 0, alpha_ab); - }, - in, out); -} -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - -} // namespace - -NEGEMMMatrixAdditionKernel::NEGEMMMatrixAdditionKernel() - : INESimpleKernel(), _func(nullptr), _beta(0.0f) -{ -} - -void NEGEMMMatrixAdditionKernel::configure(const ITensor *input, ITensor *output, float beta) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), beta)); - - switch(input->info()->data_type()) - { - case DataType::F32: - _func = &matrix_addition_f32; - break; - case DataType::F16: -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - _func = &matrix_addition_f16; - break; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - default: - ARM_COMPUTE_ERROR("Data type not supported"); - break; - } - - // Configure kernel window - INESimpleKernel::configure(input, output, num_elems_processed_per_iteration); - - _beta = beta; -} - -Status NEGEMMMatrixAdditionKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float beta) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, beta)); - ARM_COMPUTE_RETURN_ON_ERROR(INESimpleKernel::validate(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration)); - return Status{}; -} - -void NEGEMMMatrixAdditionKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window); - - if(_beta != 0.0f) - { - (*_func)(_input, _output, window, _beta); - } -} -} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h deleted file mode 100644 index c896cabc6a..0000000000 --- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H -#define ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H - -#include "src/core/NEON/INESimpleKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta: - * - * @note [ MTX_OUT = MTX_0 + beta * MTX_1 ] with MTX_0 and MTX_1 of the same size - * - * @note This stage is used to finalize the GEMM result and it is computed if and only if beta != 0.0. In case this kernel is used for finalizing GEMM result, we have: - * - MTX_0 = A * B * alpha, where MTX_0 is the output of @ref NEGEMMMatrixMultiplyKernel - * - MTX_1 = C - */ -class NEGEMMMatrixAdditionKernel : public INESimpleKernel -{ -public: - const char *name() const override - { - return "NEGEMMMatrixAdditionKernel"; - } - /** Constructor */ - NEGEMMMatrixAdditionKernel(); - /** Prevent instances of this class from being copied */ - NEGEMMMatrixAdditionKernel(const NEGEMMMatrixAdditionKernel &) = delete; - /** Prevent instances of this class from being copied */ - NEGEMMMatrixAdditionKernel &operator=(const NEGEMMMatrixAdditionKernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMMatrixAdditionKernel(NEGEMMMatrixAdditionKernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMMatrixAdditionKernel &operator=(NEGEMMMatrixAdditionKernel &&) = default; - /** Default destructor */ - ~NEGEMMMatrixAdditionKernel() = default; - /** Initialise the kernel's input and output. - * - * @note The input and output tensor must have the same dimensions - * - * @param[in] input Input tensor (Matrix C). Data types supported: F16/F32 - * @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input. - * @param[in] beta Weight of matrix C - */ - void configure(const ITensor *input, ITensor *output, float beta); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMMatrixAdditionKernel. - * - * @note The input and output tensor must have the same dimensions - * - * @param[in] input Input tensor info (Matrix C). Data types supported: F16/F32 - * @param[in] output Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input. - * @param[in] beta Weight of matrix C - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - /** Common signature for all the matrix addition functions - * - * @param[in] input An input tensor. Data types supported: F16/F32 - * @param[out] output The output tensor. Data type supported: same as @p input - * @param[in] window Region on which to execute the kernel. - * @param[in] beta Weight of matrix C - */ - using MatrixAdditionFunction = void(const ITensor *input, ITensor *output, const Window &window, float beta); - /** Matrix addition function to use for the particular tensor types passed to configure() */ - MatrixAdditionFunction *_func; - float _beta; -}; -} // namespace arm_compute -#endif /* ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H */ diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h index 3bc162a1b4..4341ff00df 100644 --- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h +++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h @@ -32,7 +32,7 @@ class ITensor; /** Kernel to multiply two input matrices "A" and "B". All elements of the output matrix/vector will be multiplied by alpha after the matrix multiplication * - * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref NEGEMMInterleave4x4Kernel" and @ref NEGEMMTranspose1xWKernel + * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped * */ @@ -55,7 +55,7 @@ public: NEGEMMMatrixMultiplyKernel &operator=(NEGEMMMatrixMultiplyKernel &&) = default; /** Initialise the kernel's input and output. * - * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel + * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * These two kernels change the layout of the original matrices to be more cache-friendly. * * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: F16/F32 @@ -63,7 +63,7 @@ public: * If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Weight of the matrix product - * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel + * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped */ void configure(const ITensor *input0, const ITensor *input1, ITensor *output, float alpha, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo()); @@ -74,7 +74,7 @@ public: * If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0 * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Weight of the matrix product - * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel + * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped * * @return a status diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp deleted file mode 100644 index 20b0cabd1f..0000000000 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" - -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" -#include "src/core/NEON/INEKernel.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" - -#include - -namespace arm_compute -{ -namespace -{ -TensorShape get_output_shape(const ITensorInfo *input) -{ - TensorShape output_shape{ input->tensor_shape() }; - const size_t transpose_w = 16 / input->element_size(); - output_shape.set(0, input->dimension(1) * transpose_w); - output_shape.set(1, static_cast(std::ceil((input->dimension(0) / static_cast(transpose_w))))); - return output_shape; -} - -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); - //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. - - if(output->total_size() != 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), get_output_shape(input)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } - - return Status{}; -} -} // namespace - -void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), get_output_shape(input->info()), 1, input->info()->data_type()); - - // Perform validate step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); - - _input = input; - _output = output; - - const size_t vector_size = 16 / input->info()->element_size(); - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps(vector_size)); - - INEKernel::configure(win); -} - -Status NEGEMMTranspose1xWKernel::validate(const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); - - return Status{}; -} - -void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window); - - /* - * Following an example of how the transposition1xW works when the input data type is F32 - * - * |a00 a01 a02 a03| - * |a10 a11 a12 a13| - * |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 | - * |a30 a31 a32 a33| - * - * The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor) - */ - - // Set window for output tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications - Window win_out(window); - win_out.set(Window::DimX, Window::Dimension(0, 0, 0)); - win_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - - Iterator in(_input, window); - Iterator out(_output, win_out); - - const size_t in_width = _input->info()->dimension(0); - const size_t element_size = _input->info()->element_size(); - const size_t out_stride = _output->info()->strides_in_bytes()[1]; - const size_t vector_size = 16 / element_size; - - execute_window_loop(window, [&](const Coordinates & id) - { - const uint8_t *in_ptr = in.ptr(); - uint8_t *const out_ptr = out.ptr() + (id.y() * vector_size) * element_size + (id.x() / vector_size) * out_stride; - - for(size_t k = 0; k < vector_size; ++k) - { - // If the input width is not multiple of W, we fill the reference with 0s - if((id.x() + k) >= in_width) - { - std::memset(out_ptr + k * element_size, 0, element_size); - } - else - { - std::memcpy(out_ptr + k * element_size, in_ptr + k * element_size, element_size); - } - } - }, - in, out); -} -} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h deleted file mode 100644 index 7ca71cf414..0000000000 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H -#define ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H - -#include "src/core/NEON/INESimpleKernel.h" - -namespace arm_compute -{ -// Forward declarations -class ITensor; - -/** Kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / element size of the tensor) - * - * Following an example of how the transposition1xW works when the input data is F32 - * - * @f[ - * \left( \begin{array}{cccc} - * a00 & a01 & a02 & a03 \\ - * a10 & a11 & a12 & a13 \\ - * a20 & a21 & a22 & a23 \\ - * a30 & a31 & a32 & a33 \\ - * \end{array} \right) - * \rightarrow - * \left( \begin{array}{ccccccccccccccccc} - * a00 & a01 & a02 & a03 & a10 & a11 & a12 & a13 & a20 & a21 & a22 & a23 & a30 & a31 & a32 & a33 \\ - * \end{array} \right) - * @f] - * - * Following an example of how the transposition1xW works when the input data type is F16 - * - * @f[ - * \left( \begin{array}{cccccccc} - * a00 & a01 & a02 & a03 & a04 & a05 & a06 & a07 \\ - * a10 & a11 & a12 & a13 & a14 & a15 & a16 & a17 \\ - * a20 & a21 & a22 & a23 & a24 & a25 & a26 & a27 \\ - * a30 & a31 & a32 & a33 & a34 & a35 & a36 & a37 \\ - * \end{array} \right) - * \rightarrow - * \left( \begin{array}{cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc} - * a00 & a01 & a02 & a03 & a04 & a05 & a06 & a07 & a10 & a11 & a12 & a13 & a14 & a15 & a16 & a17 & a20 & a21 & a22 & a23 & a24 & a25 & a26 & a27 & a30 & a31 & a32 & a33 & a34 & a35 & a36 & a37\\ - * \end{array} \right) - * @f] - * - * @note The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor) - * - */ -class NEGEMMTranspose1xWKernel : public INESimpleKernel -{ -public: - const char *name() const override - { - return "NEGEMMTranspose1xWKernel"; - } - /** Constructor */ - NEGEMMTranspose1xWKernel() = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMTranspose1xWKernel(const NEGEMMTranspose1xWKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEGEMMTranspose1xWKernel &operator=(const NEGEMMTranspose1xWKernel &) = delete; - /** Allow instances of this class to be moved */ - NEGEMMTranspose1xWKernel(NEGEMMTranspose1xWKernel &&) = default; - /** Allow instances of this class to be moved */ - NEGEMMTranspose1xWKernel &operator=(NEGEMMTranspose1xWKernel &&) = default; - /** Default destructor */ - ~NEGEMMTranspose1xWKernel() = default; - /** Initialise the kernel's input and output. - * - * @param[in] input Input tensor. Data types supported: All - * @param[out] output Output tensor. Data type supported: same as @p input. - */ - void configure(const ITensor *input, ITensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xWKernel - * - * @param[in] input Input tensor info. Data types supported: All - * @param[in] output Output tensor info. Data type supported: same as @p input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H */ -- cgit v1.2.1