aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-06-21 12:00:43 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2021-06-29 13:29:01 +0000
commit93b75e0c072c3cc5654fcdf6aed1068b40012081 (patch)
tree08acbf1bcafaa326bea1d8e472ad66b955c7c17f /src/core
parent5fdde99f4271891a40c02cd1e89f1344aa84583a (diff)
downloadComputeLibrary-93b75e0c072c3cc5654fcdf6aed1068b40012081.tar.gz
Port NEGEMM to memory injecting interface (Part 1)
- Start porting NEGEMM to the new API - Port NEGEMMInterleave4x4Kernel to the new API - Port NEGEMMMatrixAdditionKernel to the new API - Port NEGEMMTranspose1xWKernel to the new API - Remove padding from NEGEMMMatrixAdditionKernel - Remove unused INESimpleKernel and ICPPSimpleKernel Partially resolves: COMPMID-4402 Change-Id: I63edadddfe00a54586e5384d6a0211db25ae9042 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5857 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CPP/ICPPSimpleKernel.cpp75
-rw-r--r--src/core/NEON/INESimpleKernel.h34
-rw-r--r--src/core/NEON/NEKernels.h3
-rw-r--r--src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h112
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h2
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp164
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h8
-rw-r--r--src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp (renamed from src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp)163
-rw-r--r--src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h93
-rw-r--r--src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp200
-rw-r--r--src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h (renamed from src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h)65
-rw-r--r--src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp (renamed from src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp)97
-rw-r--r--src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h (renamed from src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h)55
14 files changed, 472 insertions, 601 deletions
diff --git a/src/core/CPP/ICPPSimpleKernel.cpp b/src/core/CPP/ICPPSimpleKernel.cpp
deleted file mode 100644
index 9e4df5ec8a..0000000000
--- a/src/core/CPP/ICPPSimpleKernel.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CPP/ICPPSimpleKernel.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/ITensor.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-namespace arm_compute
-{
-namespace
-{
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int num_elems_processed_per_iteration,
- bool border_undefined, const arm_compute::BorderSize &border_size)
-{
- // Configure kernel window
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration), border_undefined, border_size);
- AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
- bool window_changed = update_window_and_padding(win, input_access, output_access);
-
- output_access.set_valid_region(win, input->valid_region(), border_undefined, border_size);
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
-} // namespace
-
-ICPPSimpleKernel::ICPPSimpleKernel()
- : _input{ nullptr }, _output{ nullptr }
-{
-}
-
-void ICPPSimpleKernel::configure(const ITensor *input, ITensor *output, unsigned int num_elems_processed_per_iteration, bool border_undefined, const BorderSize &border_size)
-{
- _input = input;
- _output = output;
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), num_elems_processed_per_iteration, border_undefined, border_size);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICPPKernel::configure(win_config.second);
-}
-
-Status ICPPSimpleKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_elems_processed_per_iteration,
- bool border_undefined, const arm_compute::BorderSize &border_size)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration, border_undefined, border_size).first);
- return Status{};
-}
-
-} // namespace arm_compute
diff --git a/src/core/NEON/INESimpleKernel.h b/src/core/NEON/INESimpleKernel.h
deleted file mode 100644
index 2986e7b5c9..0000000000
--- a/src/core/NEON/INESimpleKernel.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_INESIMPLEKERNEL_H
-#define ARM_COMPUTE_INESIMPLEKERNEL_H
-
-#include "arm_compute/core/CPP/ICPPSimpleKernel.h"
-
-namespace arm_compute
-{
-/** Interface for simple CPU kernels having 1 tensor input and 1 tensor output */
-using INESimpleKernel = ICPPSimpleKernel;
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_INESIMPLEKERNEL_H */
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h
index 268871a4e8..0f7475c0b5 100644
--- a/src/core/NEON/NEKernels.h
+++ b/src/core/NEON/NEKernels.h
@@ -41,14 +41,11 @@
#include "src/core/NEON/kernels/NEFFTScaleKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEFuseBatchNormalizationKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "src/core/NEON/kernels/NEGatherKernel.h"
#include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h"
#include "src/core/NEON/kernels/NEIm2ColKernel.h"
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
deleted file mode 100644
index e592d5ef6e..0000000000
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H
-#define ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H
-
-#include "src/core/NEON/INESimpleKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Kernel to interleave the elements of a matrix
- *
- * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
- *
- * @f[
- * \left( \begin{array}{cccc}
- * a00 & a01 & a02 & a03 \\
- * a10 & a11 & a12 & a13 \\
- * a20 & a21 & a22 & a23 \\
- * a30 & a31 & a32 & a33 \\
- * \end{array} \right)
- * \rightarrow
- * \left( \begin{array}{ccccccccccccccccc}
- * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\
- * \end{array} \right)
- * @f]
- *
- * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
- */
-class NEGEMMInterleave4x4Kernel : public INESimpleKernel
-{
-public:
- const char *name() const override
- {
- return "NEGEMMInterleave4x4Kernel";
- }
- /** Constructor */
- NEGEMMInterleave4x4Kernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEGEMMInterleave4x4Kernel(const NEGEMMInterleave4x4Kernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEGEMMInterleave4x4Kernel &operator=(const NEGEMMInterleave4x4Kernel &) = delete;
- /** Allow instances of this class to be moved */
- NEGEMMInterleave4x4Kernel(NEGEMMInterleave4x4Kernel &&) = default;
- /** Allow instances of this class to be moved */
- NEGEMMInterleave4x4Kernel &operator=(NEGEMMInterleave4x4Kernel &&) = default;
- /** Default destructor */
- ~NEGEMMInterleave4x4Kernel() = default;
- /** Initialise the kernel's input and output.
- *
- * @param[in] input Input tensor. Data types supported: All
- * @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
- */
- void configure(const ITensor *input, ITensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMInterleave4x4Kernel
- *
- * @param[in] input Input tensor info. Data types supported: All
- * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- void run(const Window &window, const ThreadInfo &info) override;
-
-private:
- /** Template function to run gemm interleave 4x4
- *
- * @tparam ScalarType Scalar datatype
- *
- * @param[in] input Input tensor. Data types supported: uint32_t, uint16_t and uint8_t
- * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t
- * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
- */
- template <typename ScalarType>
- void gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window);
-
- /** Common signature for all the specialised gemm interleave 4x4 functions
- *
- * @param[in] input Input tensor. Data types supported: uint32_t, uint16_t and uint8_t
- * @param[out] output Output tensor. Data types supported: uint32_t, uint16_t and uint8_t
- * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
- */
- using GEMMInterleaveFunctionFuncPtr = void (NEGEMMInterleave4x4Kernel::*)(const ITensor *input, ITensor *output, const Window &window);
-
- GEMMInterleaveFunctionFuncPtr _func;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEGEMMINTERLEAVE4x4KERNEL_H*/
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index b95bdd4ca5..6bcf59ee96 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -686,7 +686,7 @@ void inline matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &out, int
const auto width_out = static_cast<int>(out_info.dimension(0));
const auto height_out = static_cast<int>(out_info.dimension(1));
const size_t out_stride = out_info.strides_in_bytes()[1] / out_info.element_size();
- // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW
+ // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with CpuGemmInterleave4x4 and CpuGemmTranspose1xW
// The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
execute_window_loop(window, [&](const Coordinates & id)
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index acfb79edeb..b9a1b5e840 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -61,7 +61,7 @@ public:
~NEGEMMLowpMatrixMultiplyKernel() = default;
/** Initialise the kernel's input and output.
*
- * The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two
+ * The input matrices @p input0 and @p input1 must be the output of the kernels: cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel. These two
* kernels change the layout of the original matrices to be more cache-friendly.
*
* @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: U8/QASYMM8/S8/QASYMM8_SIGNED
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
deleted file mode 100644
index 6a2802a991..0000000000
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CPP/Validate.h"
-#include "src/core/NEON/NEFixedPoint.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <arm_neon.h>
-
-namespace arm_compute
-{
-namespace
-{
-constexpr unsigned int num_elems_processed_per_iteration = 16;
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float beta)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_UNUSED(beta);
-
- ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
-
- if(output->total_size() > 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
-
- return Status{};
-}
-
-void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &window, float beta)
-{
- const float32x4_t beta_f32 = vdupq_n_f32(beta);
-
- Iterator in(input, window);
- Iterator out(output, window);
-
- execute_window_loop(window, [&](const Coordinates &)
- {
- const auto in_ptr = reinterpret_cast<const float *>(in.ptr());
- const auto out_ptr = reinterpret_cast<float *>(out.ptr());
-
- float32x4x4_t alpha_ab = vld4q_f32(out_ptr);
- const float32x4x4_t c = vld4q_f32(in_ptr);
-
- // Multiply matrix C by its weight and accumulate
- alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32);
- alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32);
- alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32);
- alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32);
-
- vst4q_f32(out_ptr, alpha_ab);
- },
- in, out);
-}
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &window, float beta)
-{
- const float16x8_t beta_f16 = vdupq_n_f16(beta);
-
- Iterator in(input, window);
- Iterator out(output, window);
-
- execute_window_loop(window, [&](const Coordinates &)
- {
- const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr());
- const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
-
- float16x8x2_t alpha_ab = vld2q_f16(out_ptr);
- const float16x8x2_t c = vld2q_f16(in_ptr);
- // Multiply matrix C by its weight and accumulate
- alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16));
- alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16));
-
- vst2q_f16(out_ptr + 0, alpha_ab);
- },
- in, out);
-}
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-
-} // namespace
-
-NEGEMMMatrixAdditionKernel::NEGEMMMatrixAdditionKernel()
- : INESimpleKernel(), _func(nullptr), _beta(0.0f)
-{
-}
-
-void NEGEMMMatrixAdditionKernel::configure(const ITensor *input, ITensor *output, float beta)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), beta));
-
- switch(input->info()->data_type())
- {
- case DataType::F32:
- _func = &matrix_addition_f32;
- break;
- case DataType::F16:
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- _func = &matrix_addition_f16;
- break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- default:
- ARM_COMPUTE_ERROR("Data type not supported");
- break;
- }
-
- // Configure kernel window
- INESimpleKernel::configure(input, output, num_elems_processed_per_iteration);
-
- _beta = beta;
-}
-
-Status NEGEMMMatrixAdditionKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float beta)
-{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, beta));
- ARM_COMPUTE_RETURN_ON_ERROR(INESimpleKernel::validate(input->clone().get(), output->clone().get(), num_elems_processed_per_iteration));
- return Status{};
-}
-
-void NEGEMMMatrixAdditionKernel::run(const Window &window, const ThreadInfo &info)
-{
- ARM_COMPUTE_UNUSED(info);
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window);
-
- if(_beta != 0.0f)
- {
- (*_func)(_input, _output, window, _beta);
- }
-}
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
index 3bc162a1b4..4341ff00df 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
+++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h
@@ -32,7 +32,7 @@ class ITensor;
/** Kernel to multiply two input matrices "A" and "B". All elements of the output matrix/vector will be multiplied by alpha after the matrix multiplication
*
- * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref NEGEMMInterleave4x4Kernel" and @ref NEGEMMTranspose1xWKernel
+ * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
* @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped
*
*/
@@ -55,7 +55,7 @@ public:
NEGEMMMatrixMultiplyKernel &operator=(NEGEMMMatrixMultiplyKernel &&) = default;
/** Initialise the kernel's input and output.
*
- * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel
+ * @note If the output tensor is a matrix, the input matrices @p input0 and @p input1 should be the output of the kernels: @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
* These two kernels change the layout of the original matrices to be more cache-friendly.
*
* @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: F16/F32
@@ -63,7 +63,7 @@ public:
* If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0
* @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
* @param[in] alpha Weight of the matrix product
- * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel
+ * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
* @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
*/
void configure(const ITensor *input0, const ITensor *input1, ITensor *output, float alpha, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo());
@@ -74,7 +74,7 @@ public:
* If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0
* @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0.
* @param[in] alpha Weight of the matrix product
- * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel
+ * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref cpu::kernels::CpuGemmInterleave4x4Kernel and @ref cpu::kernels::CpuGemmTranspose1xWKernel
* @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
*
* @return a status
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp
index 9011680c9b..67f2a490cd 100644
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
+++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.cpp
@@ -21,118 +21,49 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
+#include "src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/NEON/INEKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include <arm_neon.h>
-#include <cstddef>
-#include <cstdint>
-#include <tuple>
-using namespace arm_compute;
-using namespace arm_compute::misc::shape_calculator;
-
-namespace
+namespace arm_compute
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions.
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-
- if(output->total_size() != 0)
- {
- TensorShape output_shape = input->tensor_shape();
- output_shape.set(0, input->dimension(0) * 4);
- output_shape.set(1, std::ceil(input->dimension(1) / 4.0f));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-} // namespace
-
-NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel()
- : _func(nullptr)
+namespace cpu
{
-}
-
-void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output)
+namespace kernels
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_interleaved_shape(*input->info())));
-
- // Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
-
- _input = input;
- _output = output;
-
- switch(input->info()->element_size())
- {
- case 1:
- _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint8_t>;
- break;
- case 2:
- _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint16_t>;
- break;
- case 4:
- _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint32_t>;
- break;
- default:
- ARM_COMPUTE_ERROR_ON("Element size not supported");
- break;
- }
-
- Window win = calculate_max_window(*input->info(), Steps(1, 4));
-
- INEKernel::configure(win);
-}
+using namespace arm_compute::misc::shape_calculator;
-Status NEGEMMInterleave4x4Kernel::validate(const ITensorInfo *input, const ITensorInfo *output)
+namespace
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
-
- return Status{};
-}
-
template <typename ScalarType>
-void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window)
+void gemm_interleave4x4(const ITensor *src, ITensor *dst, const Window &window)
{
const size_t window_start_x = window.x().start();
const size_t window_end_x = window.x().end();
- const size_t in_height = input->info()->dimension(1);
- const size_t in_stride = input->info()->strides_in_bytes()[1];
+ const size_t in_height = src->info()->dimension(1);
+ const size_t in_stride = src->info()->strides_in_bytes()[1];
const size_t partial_y = in_height % 4;
- // Set window for the input tensor
+ // Set window for the src tensor
Window win = window;
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- // Set window for the output tensor
+ // Set window for the dst tensor
Window win_out(window);
win_out.set(Window::DimX, Window::Dimension(0, 1, 1));
win_out.scale(Window::DimY, 0.25f);
- Iterator in(input, win);
- Iterator out(output, win_out);
+ Iterator in(src, win);
+ Iterator out(dst, win_out);
execute_window_loop(win, [&](const Coordinates & id)
{
@@ -167,13 +98,62 @@ void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor
},
in, out);
}
+} // namespace
-void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info)
+void CpuGemmInterleave4x4Kernel::configure(const ITensorInfo *src, ITensorInfo *dst)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+ // dst auto inizialitation if not yet initialized
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_interleaved_shape(*src)));
+
+ // Perform validate step
+ ARM_COMPUTE_ERROR_THROW_ON(CpuGemmInterleave4x4Kernel::validate(src, dst));
+
+ switch(src->element_size())
+ {
+ case 1:
+ _func = &gemm_interleave4x4<uint8_t>;
+ break;
+ case 2:
+ _func = &gemm_interleave4x4<uint16_t>;
+ break;
+ case 4:
+ _func = &gemm_interleave4x4<uint32_t>;
+ break;
+ default:
+ ARM_COMPUTE_ERROR_ON("Element size not supported");
+ break;
+ }
+
+ Window win = calculate_max_window(*src, Steps(1, 4));
+ ICPPKernel::configure(win);
+}
+
+Status CpuGemmInterleave4x4Kernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
+ ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+
+ if(dst->total_size() != 0)
+ {
+ const TensorShape dst_shape = compute_interleaved_shape(*src);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+ }
+
+ return Status{};
+}
+
+void CpuGemmInterleave4x4Kernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
ARM_COMPUTE_ERROR_ON(_func == nullptr);
+ ARM_COMPUTE_ERROR_ON(tensors.empty());
/*
* This kernel puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
* |a00 a01 a02 a03|
@@ -181,7 +161,18 @@ void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info
* |a20 a21 a22 a23| = | a00 a10 a20 a30 || a01 a11 a21 a31 || a02 a12 a22 a32 || a03 a13 a23 a33 |
* |a30 a31 a32 a33|
*
- * After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
+ * After this operation, the dst matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
*/
- (this->*_func)(_input, _output, window);
+ const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ ITensor *dst = tensors.get_tensor(TensorType::ACL_DST);
+
+ (*_func)(src, dst, window);
+}
+
+const char *CpuGemmInterleave4x4Kernel::name() const
+{
+ return "CpuGemmInterleave4x4Kernel";
}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h
new file mode 100644
index 0000000000..94d88c7001
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmInterleave4x4Kernel.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel to interleave the elements of a matrix
+ *
+ * This function puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
+ *
+ * @f[
+ * \left( \begin{array}{cccc}
+ * a00 & a01 & a02 & a03 \\
+ * a10 & a11 & a12 & a13 \\
+ * a20 & a21 & a22 & a23 \\
+ * a30 & a31 & a32 & a33 \\
+ * \end{array} \right)
+ * \rightarrow
+ * \left( \begin{array}{ccccccccccccccccc}
+ * a00 & a10 & a20 & a30 & a01 & a11 & a21 & a31 & a02 & a12 & a22 & a32 & a03 & a13 & a23 & a33 \\
+ * \end{array} \right)
+ * @f]
+ *
+ * After this operation, the dst matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
+ */
+class CpuGemmInterleave4x4Kernel : public ICpuKernel
+{
+public:
+ /** Default Constructor */
+ CpuGemmInterleave4x4Kernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmInterleave4x4Kernel);
+ /** Initialise the kernel's src and dst.
+ *
+ * @param[in] src Input tensor info. Data types supported: All
+ * @param[out] dst Output tensor info which stores the interleaved matrix. Data type supported: same as @p src.
+ */
+ void configure(const ITensorInfo *src, ITensorInfo *dst);
+ /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmInterleave4x4Kernel
+ *
+ * Similar to @ref CpuGemmInterleave4x4Kernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+private:
+ /** Common signature for all the specialised gemm interleave 4x4 functions
+ *
+ * @param[in] src Input tensor. Data types supported: uint32_t, uint16_t and uint8_t
+ * @param[out] dst Output tensor. Data types supported: uint32_t, uint16_t and uint8_t
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ using GEMMInterleaveFunctionPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window);
+
+ GEMMInterleaveFunctionPtr _func{ nullptr };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /*ARM_COMPUTE_CPU_GEMM_INTERLEAVE4x4_KERNEL_H*/
diff --git a/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp
new file mode 100644
index 0000000000..cc39cdfe83
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/NEFixedPoint.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+void matrix_addition_f32(const ITensor *src, ITensor *dst, const Window &window, float beta)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ const float32x4_t beta_f32 = vdupq_n_f32(beta);
+
+ constexpr int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win = window.collapse_if_possible(window, Window::DimZ);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(src, win);
+ Iterator out(dst, win);
+
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const float *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<float *>(out.ptr());
+
+ int x = window_start_x;
+ for(; x < (window_end_x - window_step_x); x += window_step_x)
+ {
+ float32x4x4_t alpha_ab = vld4q_f32(out_ptr + x);
+ const float32x4x4_t c = vld4q_f32(in_ptr + x);
+
+ // Multiply matrix C by its weight and accumulate
+ alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32);
+ alpha_ab.val[1] = vmlaq_f32(alpha_ab.val[1], c.val[1], beta_f32);
+ alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32);
+ alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32);
+
+ vst4q_f32(out_ptr + x, alpha_ab);
+ }
+
+ // Left-over loop
+ for(; x < window_end_x; ++x)
+ {
+ *(out_ptr + x) += *(in_ptr + x) * beta;
+ }
+ },
+ in, out);
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+void matrix_addition_f16(const ITensor *src, ITensor *dst, const Window &window, float beta)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ const float16x8_t beta_f16 = vdupq_n_f16(beta);
+
+ constexpr int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win = window.collapse_if_possible(window, Window::DimZ);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(src, win);
+ Iterator out(dst, win);
+
+ execute_window_loop(window, [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
+
+ int x = window_start_x;
+ for(; x < (window_end_x - window_step_x); x += window_step_x)
+ {
+ float16x8x2_t alpha_ab = vld2q_f16(out_ptr + x);
+ const float16x8x2_t c = vld2q_f16(in_ptr + x);
+ // Multiply matrix C by its weight and accumulate
+ alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16));
+ alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16));
+
+ vst2q_f16(out_ptr + x, alpha_ab);
+ }
+
+ // Left-over loop
+ for(; x < window_end_x; ++x)
+ {
+ *(out_ptr + x) += *(in_ptr + x) * static_cast<float16_t>(beta);
+ }
+ },
+ in, out);
+}
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+} // namespace
+
+void CpuGemmMatrixAdditionKernel::configure(const ITensorInfo *src, ITensorInfo *dst, float beta)
+{
+ ARM_COMPUTE_UNUSED(dst);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+ // Perform validation step
+ ARM_COMPUTE_ERROR_THROW_ON(CpuGemmMatrixAdditionKernel::validate(src, dst, beta));
+
+ _beta = beta;
+ switch(src->data_type())
+ {
+ case DataType::F32:
+ _func = &matrix_addition_f32;
+ break;
+ case DataType::F16:
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ _func = &matrix_addition_f16;
+ break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+ default:
+ ARM_COMPUTE_ERROR("Data type not supported");
+ break;
+ }
+
+ // Configure kernel window
+ Window win = calculate_max_window(*src, Steps());
+ ICPPKernel::configure(win);
+}
+
+Status CpuGemmMatrixAdditionKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, float beta)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_UNUSED(beta);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32);
+
+ if(dst->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+ }
+ return Status{};
+}
+
+void CpuGemmMatrixAdditionKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON(tensors.empty());
+
+ const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ ITensor *dst = tensors.get_tensor(TensorType::ACL_DST);
+
+ if(_beta != 0.0f)
+ {
+ (*_func)(src, dst, window, _beta);
+ }
+}
+
+const char *CpuGemmMatrixAdditionKernel::name() const
+{
+ return "CpuGemmMatrixAdditionKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h
index c896cabc6a..216e61b5d5 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h
+++ b/src/core/cpu/kernels/CpuGemmMatrixAdditionKernel.h
@@ -21,15 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H
-#define ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H
+#ifndef ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H
-#include "src/core/NEON/INESimpleKernel.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
namespace arm_compute
{
-class ITensor;
-
+namespace cpu
+{
+namespace kernels
+{
/** Kernel to perform the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
*
* @note [ MTX_OUT = MTX_0 + beta * MTX_1 ] with MTX_0 and MTX_1 of the same size
@@ -38,61 +41,49 @@ class ITensor;
* - MTX_0 = A * B * alpha, where MTX_0 is the output of @ref NEGEMMMatrixMultiplyKernel
* - MTX_1 = C
*/
-class NEGEMMMatrixAdditionKernel : public INESimpleKernel
+class CpuGemmMatrixAdditionKernel : public ICpuKernel
{
public:
- const char *name() const override
- {
- return "NEGEMMMatrixAdditionKernel";
- }
/** Constructor */
- NEGEMMMatrixAdditionKernel();
- /** Prevent instances of this class from being copied */
- NEGEMMMatrixAdditionKernel(const NEGEMMMatrixAdditionKernel &) = delete;
- /** Prevent instances of this class from being copied */
- NEGEMMMatrixAdditionKernel &operator=(const NEGEMMMatrixAdditionKernel &) = delete;
- /** Allow instances of this class to be moved */
- NEGEMMMatrixAdditionKernel(NEGEMMMatrixAdditionKernel &&) = default;
- /** Allow instances of this class to be moved */
- NEGEMMMatrixAdditionKernel &operator=(NEGEMMMatrixAdditionKernel &&) = default;
- /** Default destructor */
- ~NEGEMMMatrixAdditionKernel() = default;
+ CpuGemmMatrixAdditionKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmMatrixAdditionKernel);
/** Initialise the kernel's input and output.
*
* @note The input and output tensor must have the same dimensions
*
- * @param[in] input Input tensor (Matrix C). Data types supported: F16/F32
- * @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input.
- * @param[in] beta Weight of matrix C
+ * @param[in] src Input tensor info (Matrix C). Data types supported: F16/F32
+ * @param[in, out] dst Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p src.
+ * @param[in] beta Weight of matrix C
*/
- void configure(const ITensor *input, ITensor *output, float beta);
- /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMMatrixAdditionKernel.
+ void configure(const ITensorInfo *src, ITensorInfo *dst, float beta);
+ /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmMatrixAdditionKernel.
*
* @note The input and output tensor must have the same dimensions
*
- * @param[in] input Input tensor info (Matrix C). Data types supported: F16/F32
- * @param[in] output Output tensor info. If this kernel is used to finalize the GEMM result, output contains the result obtained by the kernel @ref NEGEMMMatrixMultiplyKernel. Data type supported: the same as @p input.
- * @param[in] beta Weight of matrix C
+ * Similar to @ref CpuGemmMatrixAdditionKernel::configure()
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta);
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst, float beta);
// Inherited methods overridden:
- void run(const Window &window, const ThreadInfo &info) override;
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
private:
/** Common signature for all the matrix addition functions
*
- * @param[in] input An input tensor. Data types supported: F16/F32
- * @param[out] output The output tensor. Data type supported: same as @p input
+ * @param[in] src An input tensor. Data types supported: F16/F32
+ * @param[out] dst The output tensor. Data type supported: same as @p src
* @param[in] window Region on which to execute the kernel.
* @param[in] beta Weight of matrix C
*/
- using MatrixAdditionFunction = void(const ITensor *input, ITensor *output, const Window &window, float beta);
+ using MatrixAdditionFunctionPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window, float beta);
/** Matrix addition function to use for the particular tensor types passed to configure() */
- MatrixAdditionFunction *_func;
- float _beta;
+ MatrixAdditionFunctionPtr _func{ nullptr };
+ float _beta{ 0.f };
};
+} // namespace kernels
+} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_NEGEMMMATRIXADDITIONKERNEL_H */
+#endif /* ARM_COMPUTE_CPU_GEMM_MATRIX_ADDITION_KERNEL_H */
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp
index 20b0cabd1f..4b059f57cb 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
+++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.cpp
@@ -21,13 +21,12 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
+#include "src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h"
#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
-#include "src/core/NEON/INEKernel.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -35,90 +34,77 @@
namespace arm_compute
{
-namespace
+namespace cpu
{
-TensorShape get_output_shape(const ITensorInfo *input)
+namespace kernels
{
- TensorShape output_shape{ input->tensor_shape() };
- const size_t transpose_w = 16 / input->element_size();
- output_shape.set(0, input->dimension(1) * transpose_w);
- output_shape.set(1, static_cast<size_t>(std::ceil((input->dimension(0) / static_cast<float>(transpose_w)))));
- return output_shape;
-}
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
- //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions.
-
- if(output->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), get_output_shape(input));
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
-
- return Status{};
-}
-} // namespace
+using namespace arm_compute::misc::shape_calculator;
-void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output)
+void CpuGemmTranspose1xWKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
// Output tensor auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), get_output_shape(input->info()), 1, input->info()->data_type());
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*src)));
// Perform validate step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(CpuGemmTranspose1xWKernel::validate(src, dst));
- _input = input;
- _output = output;
-
- const size_t vector_size = 16 / input->info()->element_size();
+ const size_t vector_size = 16 / src->element_size();
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(vector_size));
-
- INEKernel::configure(win);
+ Window win = calculate_max_window(*src, Steps(vector_size));
+ ICPPKernel::configure(win);
}
-Status NEGEMMTranspose1xWKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CpuGemmTranspose1xWKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+ ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+ //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
+
+ if(dst->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), compute_transpose1xW_with_element_size_shape(*src));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+ }
return Status{};
}
-void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info)
+void CpuGemmTranspose1xWKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON(tensors.empty());
/*
- * Following an example of how the transposition1xW works when the input data type is F32
+ * Following an example of how the transposition1xW works when the src data type is F32
*
* |a00 a01 a02 a03|
* |a10 a11 a12 a13|
* |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 |
* |a30 a31 a32 a33|
*
- * The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
+ * The dst matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
*/
- // Set window for output tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications
+ // Set window for dst tensor. Set to 0 the X and Y dimensions in order to allow multi-threading implementation and future batched matrix multiplications
Window win_out(window);
win_out.set(Window::DimX, Window::Dimension(0, 0, 0));
win_out.set(Window::DimY, Window::Dimension(0, 0, 0));
- Iterator in(_input, window);
- Iterator out(_output, win_out);
+ const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ ITensor *dst = tensors.get_tensor(TensorType::ACL_DST);
- const size_t in_width = _input->info()->dimension(0);
- const size_t element_size = _input->info()->element_size();
- const size_t out_stride = _output->info()->strides_in_bytes()[1];
+ Iterator in(src, window);
+ Iterator out(dst, win_out);
+
+ const size_t in_width = src->info()->dimension(0);
+ const size_t element_size = src->info()->element_size();
+ const size_t out_stride = dst->info()->strides_in_bytes()[1];
const size_t vector_size = 16 / element_size;
execute_window_loop(window, [&](const Coordinates & id)
@@ -128,7 +114,7 @@ void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info)
for(size_t k = 0; k < vector_size; ++k)
{
- // If the input width is not multiple of W, we fill the reference with 0s
+ // If the src width is not multiple of W, we fill the reference with 0s
if((id.x() + k) >= in_width)
{
std::memset(out_ptr + k * element_size, 0, element_size);
@@ -141,4 +127,11 @@ void NEGEMMTranspose1xWKernel::run(const Window &window, const ThreadInfo &info)
},
in, out);
}
+
+const char *CpuGemmTranspose1xWKernel::name() const
+{
+ return "CpuGemmTranspose1xWKernel";
+}
+} // namespace kernels
+} // namespace cpu
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h
index 7ca71cf414..c9c22bd27b 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h
+++ b/src/core/cpu/kernels/CpuGemmTranspose1xWKernel.h
@@ -21,16 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H
-#define ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H
+#ifndef ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H
-#include "src/core/NEON/INESimpleKernel.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
namespace arm_compute
{
-// Forward declarations
-class ITensor;
-
+namespace cpu
+{
+namespace kernels
+{
/** Kernel which transposes the elements of a matrix in chunks of 1xW, where W is equal to (16 / element size of the tensor)
*
* Following an example of how the transposition1xW works when the input data is F32
@@ -66,42 +68,31 @@ class ITensor;
* @note The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
*
*/
-class NEGEMMTranspose1xWKernel : public INESimpleKernel
+class CpuGemmTranspose1xWKernel : public ICpuKernel
{
public:
- const char *name() const override
- {
- return "NEGEMMTranspose1xWKernel";
- }
/** Constructor */
- NEGEMMTranspose1xWKernel() = default;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEGEMMTranspose1xWKernel(const NEGEMMTranspose1xWKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEGEMMTranspose1xWKernel &operator=(const NEGEMMTranspose1xWKernel &) = delete;
- /** Allow instances of this class to be moved */
- NEGEMMTranspose1xWKernel(NEGEMMTranspose1xWKernel &&) = default;
- /** Allow instances of this class to be moved */
- NEGEMMTranspose1xWKernel &operator=(NEGEMMTranspose1xWKernel &&) = default;
- /** Default destructor */
- ~NEGEMMTranspose1xWKernel() = default;
- /** Initialise the kernel's input and output.
+ CpuGemmTranspose1xWKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmTranspose1xWKernel);
+ /** Configure kernel for a given list of arguments
*
- * @param[in] input Input tensor. Data types supported: All
- * @param[out] output Output tensor. Data type supported: same as @p input.
+ * @param[in] src Input tensor info. Data types supported: All
+ * @param[out] dst Output tensor info. Data type supported: same as @p src.
*/
- void configure(const ITensor *input, ITensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xWKernel
+ void configure(const ITensorInfo *src, ITensorInfo *dst);
+ /** Static function to check if given info will lead to a valid configuration of @ref CpuGemmTranspose1xWKernel
*
- * @param[in] input Input tensor info. Data types supported: All
- * @param[in] output Output tensor info. Data type supported: same as @p input.
+ * Similar to @ref CpuGemmTranspose1xWKernel::configure()
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
// Inherited methods overridden:
- void run(const Window &window, const ThreadInfo &info) override;
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
};
+} // namespace kernels
+} // namespace cpu
} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEGEMMTRANSPOSE1xWKERNEL_H */
+#endif /*ARM_COMPUTE_CPU_GEMM_TRANSPOSE1xW_KERNEL_H */